From 769048e19937c507aa88fe1bb7f22b30407bd52b Mon Sep 17 00:00:00 2001 From: luoyaoming Date: Wed, 24 Apr 2024 09:25:13 +0800 Subject: [PATCH] Import Upstream version 1.6.24~ds1 --- .github/ISSUE_TEMPLATE/bug_report.md | 73 - .github/ISSUE_TEMPLATE/bug_report.yaml | 70 + .github/ISSUE_TEMPLATE/feature_request.md | 16 - .github/ISSUE_TEMPLATE/feature_request.yaml | 25 + .github/workflows/build-test-images.yml | 167 + .github/workflows/ci.yml | 370 +- .github/workflows/codeql.yml | 47 +- .github/workflows/images.yml | 77 + .github/workflows/nightly.yml | 47 +- .github/workflows/release.yml | 234 +- .github/workflows/release/Dockerfile | 62 + .../workflows/windows-periodic-trigger.yml | 32 + .github/workflows/windows-periodic.yml | 256 + .golangci.yml | 44 +- .mailmap | 22 + .zuul.yaml | 35 - .../containerd-build/integration-test.yaml | 96 - .zuul/playbooks/containerd-build/run.yaml | 22 - .../playbooks/containerd-build/unit-test.yaml | 20 - ADOPTERS.md | 14 +- BUILDING.md | 92 +- Makefile | 133 +- Makefile.linux | 6 +- Makefile.windows | 6 +- Protobuild.toml | 17 - README.md | 31 +- RELEASES.md | 62 +- Vagrantfile | 68 +- .../Microsoft/hcsshim => api}/Protobuild.toml | 37 +- .../cgroups/stats/v1 => api}/doc.go | 2 +- api/next.pb.txt | 8 + api/services/containers/v1/containers.pb.go | 2 +- api/services/containers/v1/containers.proto | 2 +- .../services/containers/v1/doc.go | 4 +- api/services/content/v1/content.pb.go | 2 +- api/services/content/v1/content.proto | 2 +- .../services/content/v1/doc.go | 8 +- .../v2/stats => api/services/diff/v1}/doc.go | 2 +- api/services/images/v1/images.pb.go | 2 +- api/services/images/v1/images.proto | 2 +- .../introspection/v1/introspection.pb.go | 2 +- .../introspection/v1/introspection.proto | 2 +- api/services/namespaces/v1/doc.go | 17 + api/services/snapshots/v1/doc.go | 17 + api/services/snapshots/v1/snapshots.pb.go | 2 +- api/services/snapshots/v1/snapshots.proto | 2 +- api/services/tasks/v1/doc.go | 17 + api/services/tasks/v1/tasks.pb.go | 219 +- api/services/tasks/v1/tasks.proto | 2 + api/services/ttrpc/events/v1/doc.go | 2 +- api/services/version/v1/doc.go | 18 + api/types/task/doc.go | 18 + archive/compression/compression.go | 54 +- archive/compression/compression_test.go | 64 +- archive/issues_test.go | 7 +- archive/tar.go | 107 +- archive/tar_freebsd.go | 2 - archive/tar_linux_test.go | 44 +- archive/tar_mostunix.go | 1 + archive/tar_opts_linux.go | 2 - archive/tar_opts_windows.go | 6 +- archive/tar_test.go | 177 +- archive/tar_unix.go | 37 +- archive/tar_windows.go | 6 +- archive/tarheader/tarheader.go | 82 + archive/tarheader/tarheader_unix.go | 59 + archive/time_unix.go | 6 +- cio/io_test.go | 24 +- cio/io_unix.go | 12 +- cio/io_unix_test.go | 10 +- cio/io_windows.go | 7 +- cio/io_windows_test.go | 2 - client.go | 51 +- client_opts.go | 9 + cmd/containerd-shim-runc-v1/main.go | 1 + cmd/containerd-shim-runc-v2/main.go | 8 +- cmd/containerd-shim/main_unix.go | 14 +- cmd/containerd-shim/shim_darwin.go | 2 - cmd/containerd-shim/shim_freebsd.go | 2 - cmd/containerd-stress/density.go | 11 +- cmd/containerd-stress/exec_worker.go | 6 + cmd/containerd-stress/main.go | 20 +- cmd/containerd-stress/rlimit_freebsd.go | 2 - cmd/containerd-stress/rlimit_unix.go | 1 + cmd/containerd-stress/rlimit_windows.go | 2 - cmd/containerd/builtins.go | 3 + cmd/containerd/builtins_aufs_linux.go | 1 + cmd/containerd/builtins_btrfs_linux.go | 1 + cmd/containerd/builtins_cri.go | 1 + cmd/containerd/builtins_devmapper_linux.go | 1 + cmd/containerd/builtins_linux.go | 1 - cmd/containerd/builtins_unix.go | 1 + cmd/containerd/builtins_windows.go | 3 - cmd/containerd/builtins_zfs_linux.go | 1 + cmd/containerd/command/config_unsupported.go | 1 + cmd/containerd/command/main.go | 143 +- cmd/containerd/command/main_unix.go | 10 +- cmd/containerd/command/main_windows.go | 10 +- cmd/containerd/command/notify_linux.go | 2 - cmd/containerd/command/notify_unsupported.go | 1 + cmd/containerd/command/publish.go | 14 +- cmd/containerd/command/service_unsupported.go | 1 + cmd/containerd/command/service_windows.go | 64 +- cmd/containerd/main.go | 3 +- cmd/ctr/app/main.go | 4 +- cmd/ctr/app/main_unix.go | 1 + cmd/ctr/commands/commands.go | 46 +- cmd/ctr/commands/commands_unix.go | 4 + cmd/ctr/commands/commands_windows.go | 2 - cmd/ctr/commands/containers/checkpoint.go | 4 +- cmd/ctr/commands/containers/containers.go | 17 +- cmd/ctr/commands/containers/restore.go | 3 +- cmd/ctr/commands/content/content.go | 7 +- cmd/ctr/commands/content/fetch.go | 2 +- cmd/ctr/commands/images/convert.go | 4 +- cmd/ctr/commands/images/export.go | 7 +- cmd/ctr/commands/images/images.go | 16 +- cmd/ctr/commands/images/import.go | 35 +- cmd/ctr/commands/images/mount.go | 5 +- cmd/ctr/commands/images/pull.go | 5 +- cmd/ctr/commands/images/push.go | 24 +- cmd/ctr/commands/images/unmount.go | 5 +- cmd/ctr/commands/leases/leases.go | 5 +- cmd/ctr/commands/namespaces/namespaces.go | 8 +- .../commands/namespaces/namespaces_other.go | 1 + cmd/ctr/commands/oci/oci.go | 5 +- cmd/ctr/commands/pprof/pprof.go | 3 +- cmd/ctr/commands/pprof/pprof_unix.go | 1 + cmd/ctr/commands/resolver.go | 18 +- cmd/ctr/commands/run/run.go | 32 +- cmd/ctr/commands/run/run_unix.go | 93 +- cmd/ctr/commands/run/run_windows.go | 21 +- cmd/ctr/commands/shim/io_unix.go | 1 + cmd/ctr/commands/shim/shim.go | 7 +- cmd/ctr/commands/signals_notlinux.go | 3 +- cmd/ctr/commands/snapshots/snapshots.go | 11 +- cmd/ctr/commands/tasks/checkpoint.go | 2 +- cmd/ctr/commands/tasks/delete.go | 2 +- cmd/ctr/commands/tasks/exec.go | 75 +- cmd/ctr/commands/tasks/kill.go | 12 +- cmd/ctr/commands/tasks/metrics.go | 2 - cmd/ctr/commands/tasks/ps.go | 2 +- cmd/ctr/commands/tasks/start.go | 3 +- cmd/ctr/commands/tasks/tasks_unix.go | 3 +- cmd/ctr/commands/tasks/tasks_windows.go | 2 +- cmd/ctr/main.go | 3 +- cmd/gen-manpages/main.go | 4 +- container.go | 13 +- container_checkpoint_opts.go | 2 +- container_opts.go | 10 +- container_opts_unix.go | 1 + container_restore_opts.go | 9 +- containerstore.go | 1 + content/adaptor_test.go | 89 + content/helpers.go | 101 +- content/helpers_test.go | 114 +- content/local/locks.go | 10 +- content/local/locks_test.go | 2 +- content/local/readerat.go | 7 +- content/local/store.go | 85 +- content/local/store_bsd.go | 3 +- content/local/store_openbsd.go | 3 +- content/local/store_test.go | 39 +- content/local/store_unix.go | 3 +- content/local/writer.go | 27 +- content/proxy/content_writer.go | 12 +- content/testsuite/testsuite.go | 65 +- contrib/Dockerfile.test | 103 +- .../cri-in-userns/docker-entrypoint.sh | 51 + .../cri-in-userns/etc_containerd_config.toml | 10 + contrib/ansible/README.md | 4 +- contrib/ansible/cri-containerd.yaml | 4 +- contrib/ansible/tasks/k8s.yaml | 6 +- contrib/ansible/vars/vars.yaml | 2 +- contrib/apparmor/apparmor.go | 8 +- contrib/apparmor/apparmor_test.go | 1 + contrib/apparmor/apparmor_unsupported.go | 3 +- contrib/apparmor/template.go | 16 +- contrib/apparmor/template_test.go | 1 + contrib/fuzz/archive_fuzzer.go | 76 + .../fuzz/cap_fuzzer.go | 17 +- contrib/fuzz/container_fuzzer.go | 453 + contrib/fuzz/containerd_import_fuzzer.go | 87 + contrib/fuzz/content_fuzzer.go | 169 + .../utils.go => contrib/fuzz/cri_fuzzer.go | 36 +- contrib/fuzz/docker_fuzzer.go | 86 + contrib/fuzz/filters_fuzzers.go | 1 + contrib/fuzz/metadata_fuzzer.go | 405 + contrib/fuzz/oss_fuzz_build.sh | 108 + contrib/fuzz/platforms_fuzzers.go | 1 + contrib/gce/cloud-init/master.yaml | 3 +- contrib/gce/cloud-init/node.yaml | 3 +- contrib/gce/cni.template | 2 +- contrib/gce/configure.sh | 5 +- contrib/nvidia/nvidia.go | 12 +- contrib/seccomp/kernelversion/kernel_linux.go | 92 + .../kernelversion/kernel_linux_test.go | 141 + contrib/seccomp/seccomp.go | 4 +- contrib/seccomp/seccomp_default.go | 62 +- .../seccomp/seccomp_default_unsupported.go | 1 + debian/README.Debian | 8 - debian/changelog | 35 - debian/clean | 3 - debian/compat | 1 - debian/containerd.docs | 3 - debian/containerd.install | 2 - debian/containerd.service | 1 - debian/control | 42 - debian/copyright | 11777 ----- debian/docs | 2 - ...g-github-containerd-containerd-dev.install | 1 - debian/rules | 76 - debian/source/format | 1 - debian/tests/basic-smoke | 15 - debian/tests/control | 3 - debian/watch | 5 - defaults/defaults_darwin.go | 37 + defaults/defaults_unix.go | 3 +- defaults/defaults_windows.go | 2 - diff.go | 3 + diff/apply/apply.go | 11 +- diff/apply/apply_darwin.go | 40 + diff/apply/apply_linux.go | 14 +- diff/apply/apply_linux_test.go | 2 - diff/apply/apply_other.go | 3 +- diff/diff.go | 15 + diff/lcow/lcow.go | 24 +- diff/stream.go | 2 +- diff/stream_unix.go | 5 +- diff/stream_windows.go | 9 +- diff/walking/differ.go | 77 +- diff/windows/windows.go | 47 +- docs/PLUGINS.md | 60 +- docs/RUNC.md | 46 +- docs/cri/architecture.md | 4 +- docs/cri/config.md | 67 +- docs/cri/crictl.md | 26 +- docs/cri/decryption.md | 4 +- docs/cri/proposal.md | 6 +- docs/garbage-collection.md | 3 +- docs/getting-started.md | 16 +- docs/hosts.md | 68 +- docs/man/containerd-config.toml.5.md | 168 +- docs/managed-opt.md | 4 +- docs/ops.md | 14 +- docs/stream_processors.md | 2 + docs/tracing.md | 88 + errdefs/errors.go | 5 +- errdefs/grpc.go | 10 +- errdefs/grpc_test.go | 14 +- events/exchange/exchange.go | 24 +- events/exchange/exchange_test.go | 2 +- events/plugin/plugin.go | 32 + filters/filter.go | 1 - filters/parser.go | 10 +- filters/quote.go | 11 +- gc/gc.go | 2 + gc/scheduler/scheduler.go | 4 +- go.mod | 164 +- go.sum | 891 +- identifiers/validate.go | 8 +- image.go | 25 +- image_store.go | 1 + images/archive/exporter.go | 67 +- images/archive/importer.go | 122 +- images/archive/reference.go | 6 +- images/converter/default.go | 53 +- images/converter/uncompress/uncompress.go | 8 +- images/handlers.go | 42 +- images/image.go | 21 +- images/mediatypes.go | 4 +- import.go | 46 +- install.go | 8 +- integration/addition_gids_test.go | 142 +- integration/client/client_test.go | 15 +- integration/client/client_ttrpc_test.go | 9 + integration/client/client_unix_test.go | 8 +- integration/client/client_windows_test.go | 21 +- .../client/container_checkpoint_test.go | 10 +- integration/client/container_linux_test.go | 685 +- integration/client/container_test.go | 882 +- integration/client/content_test.go | 3 +- integration/client/convert_test.go | 6 +- .../client/daemon_config_linux_test.go | 94 +- integration/client/daemon_test.go | 18 +- integration/client/export_test.go | 107 +- integration/client/go.mod | 57 +- integration/client/go.sum | 643 +- integration/client/helpers_unix_test.go | 14 + integration/client/helpers_windows_test.go | 23 +- integration/client/image_test.go | 10 +- integration/client/import_test.go | 84 +- integration/client/lease_test.go | 16 +- .../client/restart_monitor_linux_test.go | 116 - integration/client/restart_monitor_test.go | 234 + integration/client/task_opts_unix_test.go | 1 + integration/common.go | 49 +- integration/container_log_test.go | 49 +- integration/container_restart_test.go | 54 +- integration/container_stats_test.go | 132 +- integration/container_stop_test.go | 32 +- .../container_update_resources_test.go | 260 +- integration/container_volume_test.go | 140 + .../container_without_image_ref_test.go | 20 +- integration/containerd_image_test.go | 61 +- .../cri-api/pkg/apis/services.go | 75 +- integration/duplicate_name_test.go | 15 +- .../failpoint/cmd/cni-bridge-fp/README.md | 159 + .../failpoint/cmd/cni-bridge-fp/main_linux.go | 202 + .../containerd-shim-runc-fp-v1/main_linux.go | 14 +- .../plugin_linux.go | 141 + integration/image_list.sample.toml | 7 +- integration/image_load_test.go | 28 +- integration/imagefs_info_test.go | 28 +- integration/images/README.md | 127 + integration/images/volume-copy-up/Dockerfile | 4 +- .../images/volume-copy-up/Dockerfile_windows | 37 + integration/images/volume-copy-up/Makefile | 97 +- .../volume-ownership/Dockerfile_windows | 39 + integration/images/volume-ownership/Makefile | 103 +- .../tools/get_owner_windows.go | 45 +- integration/issue7496_linux_test.go | 180 + integration/main_test.go | 267 +- integration/no_metadata_test.go | 15 +- integration/pod_dualstack_test.go | 49 +- integration/pod_hostname_test.go | 37 +- integration/remote/remote_image.go | 31 +- integration/remote/remote_runtime.go | 102 +- integration/remote/util/util_unix.go | 4 +- integration/remote/util/util_unsupported.go | 1 + integration/remote/util/util_windows.go | 2 - integration/remote/utils.go | 2 +- integration/restart_test.go | 85 +- integration/runtime_handler_test.go | 14 +- integration/sandbox_clean_remove_test.go | 8 +- .../sandbox_clean_remove_windows_test.go | 179 + integration/sandbox_run_rollback_test.go | 396 + integration/shim_dial_unix_test.go | 177 + integration/truncindex_test.go | 29 +- integration/util/boottime_util_darwin.go | 2 - integration/util/boottime_util_linux.go | 1 + integration/util/util_unix.go | 4 +- integration/util/util_unsupported.go | 1 + integration/util/util_windows.go | 2 - integration/volume_copy_up_test.go | 114 +- .../volume_copy_up_unix_test.go | 21 +- .../volume_copy_up_windows_test.go | 26 +- integration/windows_hostprocess_test.go | 191 + labels/validate.go | 5 +- leases/id.go | 2 +- leases/lease.go | 9 +- leases/lease_test.go | 83 + log/context.go | 170 +- log/context_test.go | 43 +- log/logtest/context.go | 4 +- metadata/bolt.go | 4 +- metadata/boltutil/helpers.go | 7 +- metadata/buckets.go | 137 +- metadata/containers.go | 50 +- metadata/containers_test.go | 26 +- metadata/content.go | 41 +- metadata/content_test.go | 10 +- metadata/db.go | 10 +- metadata/db_test.go | 46 +- metadata/gc.go | 10 +- metadata/gc_test.go | 17 +- metadata/images.go | 38 +- metadata/images_test.go | 7 +- metadata/leases.go | 50 +- metadata/leases_test.go | 5 +- metadata/namespaces.go | 18 +- metadata/namespaces_test.go | 14 +- metadata/snapshot.go | 70 +- metadata/snapshot_test.go | 6 +- metrics/cgroups/cgroups.go | 15 +- .../cgroups/common/type.go | 17 +- metrics/cgroups/metrics_test.go | 158 + metrics/cgroups/v1/blkio.go | 1 + metrics/cgroups/v1/cgroups.go | 7 +- metrics/cgroups/v1/cpu.go | 1 + metrics/cgroups/v1/hugetlb.go | 1 + metrics/cgroups/v1/memory.go | 1 + metrics/cgroups/v1/metric.go | 1 + metrics/cgroups/v1/metrics.go | 83 +- metrics/cgroups/v1/oom.go | 1 + metrics/cgroups/v1/pids.go | 1 + metrics/cgroups/v2/cgroups.go | 7 +- metrics/cgroups/v2/cpu.go | 1 + metrics/cgroups/v2/io.go | 1 + metrics/cgroups/v2/memory.go | 1 + metrics/cgroups/v2/metric.go | 1 + metrics/cgroups/v2/metrics.go | 81 +- metrics/cgroups/v2/pids.go | 1 + metrics/metrics.go | 29 + metrics/types/v1/types.go | 1 + metrics/types/v2/types.go | 1 + sys/mount_linux.go => mount/fmountat_linux.go | 16 +- .../fmountat_linux_test.go | 23 +- mount/lookup_linux_test.go | 38 +- mount/lookup_unix.go | 7 +- mount/lookup_unsupported.go | 1 + mount/losetup_linux.go | 20 +- mount/losetup_linux_test.go | 5 +- mount/mount.go | 47 + mount/mount_freebsd.go | 13 +- mount/mount_linux.go | 24 +- mount/mount_linux_test.go | 15 +- mount/mount_test.go | 150 + mount/mount_unix.go | 3 +- mount/mount_windows.go | 32 +- {sys => mount}/subprocess_unsafe_linux.go | 2 +- {sys => mount}/subprocess_unsafe_linux.s | 0 mount/temp.go | 27 +- mount/temp_unix.go | 3 +- mount/temp_unsupported.go | 3 +- namespaces/context.go | 6 +- namespaces/context_test.go | 6 +- namespaces/store.go | 2 - oci/mounts.go | 71 + .../command_other.go => oci/mounts_freebsd.go | 29 +- oci/spec.go | 45 +- oci/spec_opts.go | 339 +- oci/spec_opts_linux.go | 20 +- oci/spec_opts_linux_test.go | 278 +- oci/spec_opts_nonlinux.go | 20 +- oci/spec_opts_test.go | 113 +- oci/spec_opts_unix.go | 7 +- oci/spec_opts_unix_test.go | 1 + oci/spec_opts_windows.go | 41 +- oci/spec_opts_windows_test.go | 418 +- oci/utils_unix.go | 98 +- oci/utils_unix_go116_test.go | 55 + .../utils_unix_go117_test.go | 11 +- oci/utils_unix_test.go | 165 + pkg/apparmor/apparmor.go | 11 +- pkg/apparmor/apparmor_linux.go | 11 +- pkg/apparmor/apparmor_unsupported.go | 1 + pkg/atomicfile/file.go | 148 + pkg/atomicfile/file_test.go | 77 + pkg/cap/cap_linux.go | 8 +- pkg/cri/annotations/annotations.go | 18 + pkg/cri/config/config.go | 62 +- pkg/cri/config/config_unix.go | 3 +- pkg/cri/config/config_windows.go | 4 +- pkg/cri/constants/constants.go | 8 +- pkg/cri/cri.go | 65 +- pkg/cri/io/helpers.go | 2 +- pkg/cri/io/helpers_unix.go | 1 + pkg/cri/io/helpers_windows.go | 8 +- pkg/cri/io/logger.go | 13 +- pkg/cri/io/logger_test.go | 6 +- pkg/cri/io/metrics.go | 42 + .../state.go => pkg/cri/labels/labels.go | 20 +- pkg/cri/opts/container.go | 85 +- pkg/cri/opts/spec.go | 12 +- pkg/cri/opts/spec_linux.go | 127 +- pkg/cri/opts/spec_test.go | 2 +- pkg/cri/opts/spec_windows.go | 201 +- pkg/cri/opts/spec_windows_test.go | 54 + pkg/cri/server/bandwidth/linux.go | 1 + pkg/cri/server/bandwidth/unsupported.go | 1 + pkg/cri/server/cni_conf_syncer.go | 16 +- pkg/cri/server/container_attach.go | 14 +- pkg/cri/server/container_checkpoint.go | 29 + pkg/cri/server/container_create.go | 59 +- pkg/cri/server/container_create_linux.go | 50 +- pkg/cri/server/container_create_linux_test.go | 169 +- pkg/cri/server/container_create_other.go | 3 +- pkg/cri/server/container_create_other_test.go | 3 +- pkg/cri/server/container_create_test.go | 5 +- pkg/cri/server/container_create_windows.go | 22 +- .../server/container_create_windows_test.go | 187 +- .../cri/server/container_events.go | 15 +- pkg/cri/server/container_exec.go | 9 +- pkg/cri/server/container_execsync.go | 31 +- pkg/cri/server/container_list.go | 7 +- pkg/cri/server/container_list_test.go | 2 +- pkg/cri/server/container_log_reopen.go | 8 +- pkg/cri/server/container_remove.go | 52 +- pkg/cri/server/container_start.go | 49 +- pkg/cri/server/container_stats.go | 13 +- pkg/cri/server/container_stats_list.go | 15 +- pkg/cri/server/container_stats_list_linux.go | 225 +- .../server/container_stats_list_linux_test.go | 275 + pkg/cri/server/container_stats_list_other.go | 8 +- .../server/container_stats_list_windows.go | 10 +- pkg/cri/server/container_status.go | 30 +- pkg/cri/server/container_status_test.go | 2 +- pkg/cri/server/container_stop.go | 49 +- pkg/cri/server/container_update_resources.go | 136 + .../container_update_resources_linux.go | 123 +- .../container_update_resources_linux_test.go | 145 +- .../container_update_resources_other.go | 12 +- .../container_update_resources_windows.go | 36 +- pkg/cri/server/events.go | 169 +- pkg/cri/server/events_test.go | 4 +- pkg/cri/server/helpers.go | 130 +- pkg/cri/server/helpers_linux.go | 9 +- pkg/cri/server/helpers_linux_test.go | 13 +- pkg/cri/server/helpers_other.go | 1 + pkg/cri/server/helpers_selinux_linux_test.go | 2 +- pkg/cri/server/helpers_test.go | 21 +- pkg/cri/server/helpers_windows.go | 2 - pkg/cri/server/image_list.go | 2 +- pkg/cri/server/image_list_test.go | 2 +- pkg/cri/server/image_pull.go | 184 +- pkg/cri/server/image_pull_test.go | 75 +- pkg/cri/server/image_remove.go | 16 +- pkg/cri/server/image_status.go | 21 +- pkg/cri/server/image_status_test.go | 2 +- pkg/cri/server/imagefs_info.go | 2 +- pkg/cri/server/imagefs_info_test.go | 2 +- pkg/cri/server/instrumented_service.go | 1192 +- pkg/cri/server/metrics.go | 74 + pkg/cri/server/rdt_linux.go | 51 + .../cri/server/rdt_stub_linux.go | 11 +- pkg/cri/server/restart.go | 68 +- pkg/cri/server/sandbox_list.go | 13 +- pkg/cri/server/sandbox_list_test.go | 2 +- pkg/cri/server/sandbox_portforward.go | 8 +- pkg/cri/server/sandbox_portforward_linux.go | 15 +- pkg/cri/server/sandbox_portforward_other.go | 7 +- pkg/cri/server/sandbox_portforward_windows.go | 19 +- pkg/cri/server/sandbox_remove.go | 48 +- pkg/cri/server/sandbox_run.go | 345 +- pkg/cri/server/sandbox_run_linux.go | 69 +- pkg/cri/server/sandbox_run_linux_test.go | 85 +- pkg/cri/server/sandbox_run_other.go | 6 +- pkg/cri/server/sandbox_run_other_test.go | 3 +- pkg/cri/server/sandbox_run_test.go | 19 +- pkg/cri/server/sandbox_run_windows.go | 35 +- pkg/cri/server/sandbox_run_windows_test.go | 25 +- pkg/cri/server/sandbox_stats.go | 47 + pkg/cri/server/sandbox_stats_linux.go | 177 + pkg/cri/server/sandbox_stats_list.go | 80 + pkg/cri/server/sandbox_stats_other.go | 38 + pkg/cri/server/sandbox_stats_windows.go | 35 + pkg/cri/server/sandbox_status.go | 41 +- pkg/cri/server/sandbox_status_test.go | 2 +- pkg/cri/server/sandbox_stop.go | 56 +- pkg/cri/server/service.go | 122 +- pkg/cri/server/service_linux.go | 48 +- pkg/cri/server/service_other.go | 1 + pkg/cri/server/service_test.go | 11 +- pkg/cri/server/service_windows.go | 47 +- pkg/cri/server/snapshots.go | 4 +- pkg/cri/server/status.go | 41 +- pkg/cri/server/streaming.go | 27 +- pkg/cri/server/testing/fake_cni_plugin.go | 10 + pkg/cri/server/update_runtime_config.go | 75 +- pkg/cri/server/update_runtime_config_test.go | 21 +- pkg/cri/server/version.go | 13 +- pkg/cri/store/container/container.go | 47 +- pkg/cri/store/container/container_test.go | 46 +- pkg/cri/store/container/metadata.go | 9 +- pkg/cri/store/container/metadata_test.go | 2 +- pkg/cri/store/container/status.go | 78 +- pkg/cri/store/container/status_test.go | 22 +- pkg/cri/store/errors_test.go | 8 +- pkg/cri/store/image/fake_image.go | 4 +- pkg/cri/store/image/image.go | 36 +- pkg/cri/store/image/image_test.go | 8 +- pkg/cri/store/image/sort_test.go | 3 +- pkg/cri/store/sandbox/metadata.go | 9 +- pkg/cri/store/sandbox/metadata_test.go | 2 +- pkg/cri/store/sandbox/sandbox.go | 38 +- pkg/cri/store/sandbox/sandbox_test.go | 43 +- pkg/cri/store/sandbox/status.go | 38 +- pkg/cri/store/sandbox/status_test.go | 6 +- pkg/cri/store/snapshot/snapshot.go | 5 +- pkg/cri/store/snapshot/snapshot_test.go | 6 +- pkg/cri/store/stats/stats.go | 27 + pkg/cri/streaming/portforward/httpstream.go | 48 +- pkg/cri/streaming/request_cache.go | 2 +- pkg/cri/streaming/server.go | 6 +- pkg/cri/util/deep_copy.go | 8 +- pkg/cri/util/id.go | 2 +- pkg/dialer/dialer.go | 5 +- pkg/dialer/dialer_unix.go | 1 + pkg/failpoint/fail.go | 310 + pkg/failpoint/fail_test.go | 134 + pkg/ioutil/write_closer.go | 8 +- pkg/ioutil/write_closer_test.go | 5 +- pkg/kmutex/kmutex.go | 105 + pkg/kmutex/kmutex_test.go | 170 + .../btrfs/ioctl.go => pkg/kmutex/noop.go | 20 +- pkg/netns/netns_linux.go | 39 +- pkg/netns/netns_other.go | 3 +- pkg/netns/netns_windows.go | 2 - pkg/oom/oom.go | 1 + pkg/oom/v1/v1.go | 5 +- pkg/oom/v2/v2.go | 22 +- pkg/os/mount_other.go | 1 + pkg/os/mount_unix.go | 1 + pkg/os/os.go | 5 +- pkg/os/os_unix.go | 1 + pkg/os/os_windows.go | 10 +- pkg/os/os_windows_test.go | 3 +- pkg/os/testing/fake_os_unix.go | 1 + pkg/process/deleted_state.go | 22 +- pkg/process/exec.go | 22 +- pkg/process/exec_state.go | 18 +- pkg/process/init.go | 24 +- pkg/process/init_state.go | 66 +- pkg/process/io.go | 36 +- pkg/process/io_test.go | 5 +- pkg/process/io_util.go | 3 +- pkg/process/utils.go | 10 +- pkg/progress/escape.go | 2 +- pkg/randutil/randutil.go | 48 + pkg/registrar/registrar.go | 9 +- .../btrfs => pkg/runtimeoptions/v1}/doc.go | 3 +- pkg/schedcore/prctl_linux.go | 49 + pkg/seccomp/seccomp_unsupported.go | 1 + pkg/seed/seed.go | 5 + pkg/seed/seed_other.go | 1 + pkg/shutdown/shutdown.go | 109 + pkg/snapshotters/annotations.go | 97 + pkg/snapshotters/annotations_test.go | 74 + pkg/testutil/helpers.go | 3 +- pkg/testutil/helpers_unix.go | 1 + pkg/testutil/mount_other.go | 1 + pkg/timeout/timeout.go | 10 +- pkg/ttrpcutil/client.go | 5 +- pkg/userns/userns_unsupported.go | 1 + platforms/compare.go | 12 +- platforms/compare_test.go | 20 + platforms/cpuinfo.go | 98 +- platforms/cpuinfo_linux.go | 161 + platforms/cpuinfo_linux_test.go | 141 + platforms/cpuinfo_other.go | 60 + platforms/database.go | 17 +- platforms/defaults.go | 16 - platforms/defaults_darwin.go | 45 + platforms/defaults_unix.go | 19 +- ...defaults_test.go => defaults_unix_test.go} | 3 + platforms/defaults_windows.go | 48 +- platforms/defaults_windows_test.go | 47 +- platforms/platforms.go | 69 +- platforms/platforms_test.go | 19 +- ...info_test.go => platforms_windows_test.go} | 35 +- plugin/context.go | 52 +- plugin/plugin.go | 36 +- plugin/plugin_go18.go | 1 + plugin/plugin_other.go | 1 + plugin/plugin_test.go | 385 + process.go | 10 +- protobuf/plugin/fieldpath/fieldpath.go | 3 +- protobuf/plugin/{ => fieldpath}/helpers.go | 9 +- pull.go | 15 +- reference/docker/reference.go | 16 +- releases/README.md | 2 +- releases/cri-containerd.DEPRECATED.txt | 12 + releases/v1.0.0.toml | 8 +- releases/v1.1.0.toml | 2 +- releases/v1.2.0.toml | 6 +- releases/v1.5.1.toml | 23 - releases/v1.5.3.toml | 26 - releases/v1.5.4.toml | 14 - releases/v1.5.5.toml | 23 - releases/v1.5.6.toml | 26 - releases/v1.5.8.toml | 27 - releases/v1.5.9.toml | 20 - releases/v1.6.0.toml | 78 + releases/v1.6.1.toml | 23 + releases/v1.6.10.toml | 22 + releases/v1.6.11.toml | 22 + releases/v1.6.12.toml | 19 + releases/v1.6.13.toml | 26 + releases/{v1.5.7.toml => v1.6.14.toml} | 7 +- releases/v1.6.15.toml | 20 + releases/v1.6.16.toml | 23 + releases/v1.6.17.toml | 23 + releases/v1.6.18.toml | 24 + releases/v1.6.19.toml | 20 + releases/v1.6.2.toml | 21 + releases/v1.6.20.toml | 27 + releases/v1.6.21.toml | 36 + releases/v1.6.22.toml | 47 + releases/v1.6.23.toml | 35 + releases/v1.6.24.toml | 35 + releases/v1.6.3.toml | 27 + releases/v1.6.4.toml | 21 + releases/v1.6.5.toml | 22 + releases/v1.6.6.toml | 21 + releases/v1.6.7.toml | 28 + releases/{v1.5.2.toml => v1.6.8.toml} | 8 +- releases/v1.6.9.toml | 33 + remotes/docker/auth/fetch.go | 35 +- remotes/docker/auth/fetch_test.go | 114 + remotes/docker/auth/parse.go | 3 - remotes/docker/auth/parse_test.go | 87 + remotes/docker/authorizer.go | 88 +- remotes/docker/config/config_unix.go | 15 +- remotes/docker/config/config_windows.go | 16 +- remotes/docker/config/hosts.go | 111 +- remotes/docker/config/hosts_test.go | 31 +- remotes/docker/converter.go | 9 +- remotes/docker/fetcher.go | 25 +- remotes/docker/fetcher_test.go | 8 +- remotes/docker/httpreadseeker.go | 23 +- remotes/docker/pusher.go | 193 +- remotes/docker/pusher_test.go | 329 +- remotes/docker/registry.go | 3 +- remotes/docker/resolver.go | 68 +- remotes/docker/resolver_test.go | 190 +- remotes/docker/schema1/converter.go | 32 +- remotes/docker/scope.go | 6 +- remotes/docker/scope_test.go | 5 + remotes/docker/status.go | 7 +- remotes/errors/errors.go | 3 +- remotes/handlers.go | 47 +- remotes/handlers_test.go | 144 + reports/2017-01-13.md | 2 +- rootfs/apply.go | 15 +- rootfs/diff.go | 4 +- rootfs/init.go | 7 +- rootfs/init_other.go | 1 + runtime/monitor.go | 13 +- runtime/opts/opts_linux.go | 3 - runtime/restart/monitor/change.go | 4 +- runtime/restart/monitor/monitor.go | 28 +- runtime/runtime.go | 7 +- runtime/task.go | 14 +- runtime/task_list.go | 18 +- runtime/v1/linux/bundle.go | 9 +- runtime/v1/linux/bundle_test.go | 5 +- runtime/v1/linux/process.go | 3 +- runtime/v1/linux/runtime.go | 43 +- runtime/v1/linux/task.go | 18 +- runtime/v1/shim.go | 1 + runtime/v1/shim/client/client.go | 52 +- runtime/v1/shim/client/client_linux.go | 10 +- runtime/v1/shim/client/client_unix.go | 4 +- runtime/v1/shim/local.go | 1 + runtime/v1/shim/service.go | 15 +- runtime/v1/shim/service_linux.go | 19 +- runtime/v1/shim/service_unix.go | 15 +- runtime/v2/binary.go | 83 +- runtime/v2/bundle.go | 12 +- runtime/v2/bundle_linux_test.go | 5 +- runtime/v2/example/cmd/main.go | 1 + runtime/v2/example/example.go | 1 + runtime/v2/logging/logging_unix.go | 1 + runtime/v2/logging/logging_windows.go | 10 +- runtime/v2/manager.go | 450 +- runtime/v2/manager_unix.go | 1 + runtime/v2/manager_windows.go | 2 - runtime/v2/process.go | 4 +- runtime/v2/runc/container.go | 24 +- runtime/v2/runc/manager/manager_linux.go | 285 + runtime/v2/runc/platform.go | 20 +- runtime/v2/runc/task/plugin/plugin_linux.go | 47 + runtime/v2/runc/task/service.go | 721 + runtime/v2/runc/util.go | 5 +- runtime/v2/runc/v1/service.go | 45 +- runtime/v2/runc/v2/service.go | 825 +- runtime/v2/shim.go | 199 +- runtime/v2/shim/shim.go | 372 +- runtime/v2/shim/shim_darwin.go | 6 +- runtime/v2/shim/shim_freebsd.go | 6 +- runtime/v2/shim/shim_linux.go | 5 +- runtime/v2/shim/shim_test.go | 5 +- runtime/v2/shim/shim_unix.go | 25 +- runtime/v2/shim/shim_windows.go | 11 +- runtime/v2/shim/util.go | 130 +- runtime/v2/shim/util_test.go | 118 + runtime/v2/shim/util_unix.go | 30 +- runtime/v2/shim/util_windows.go | 10 +- runtime/v2/shim_load.go | 168 + runtime/v2/shim_unix.go | 3 +- runtime/v2/shim_unix_test.go | 1 + runtime/v2/shim_windows.go | 4 +- runtime/v2/shim_windows_test.go | 3 +- script/setup/config-selinux | 2 +- script/setup/critools-version | 2 +- script/setup/enable_docker_tls_on_windows.ps1 | 22 + script/setup/enable_ssh_windows.ps1 | 37 + script/setup/install-cni | 19 +- script/setup/install-cni-windows | 12 +- script/setup/install-critools | 19 +- script/setup/install-dev-tools | 15 +- .../setup/install-failpoint-binaries | 33 +- script/setup/install-gotestsum | 2 +- script/setup/install-protobuf | 22 +- script/setup/install-runc | 20 +- script/setup/install-runhcs-shim | 43 + script/setup/install-seccomp | 7 +- script/setup/prepare_env_windows.ps1 | 51 + .../setup/prepare_windows_docker_helper.ps1 | 21 + script/setup/runc-version | 2 +- script/test/cri-integration.sh | 40 +- script/test/utils.sh | 235 +- script/verify-go-modules.sh | 112 + services.go | 59 +- services/containers/helpers.go | 1 + services/containers/local.go | 7 +- services/containers/service.go | 2 +- .../content/contentserver/contentserver.go | 8 +- services/content/service.go | 3 +- services/content/store.go | 7 +- services/diff/local.go | 8 +- services/diff/service.go | 2 +- services/diff/service_unix.go | 1 + services/diff/service_windows.go | 2 - services/events/service.go | 15 +- services/images/helpers.go | 1 + services/images/service.go | 2 +- services/introspection/local.go | 28 +- services/introspection/service.go | 13 +- services/leases/service.go | 5 +- services/namespaces/local.go | 7 +- services/namespaces/service.go | 2 +- services/opt/path_unix.go | 1 + services/opt/service.go | 5 +- services/server/config/config.go | 26 +- services/server/config/config_test.go | 43 +- services/server/namespace.go | 52 + services/server/server.go | 162 +- services/server/server_linux.go | 7 +- services/server/server_unsupported.go | 1 + services/server/server_windows.go | 2 - services/snapshots/service.go | 2 +- services/snapshots/snapshotters.go | 7 +- services/tasks/local.go | 74 +- .../rdma.go => services/tasks/local_darwin.go | 36 +- services/tasks/local_freebsd.go | 3 +- services/tasks/local_unix.go | 7 +- services/tasks/local_windows.go | 3 +- .../tasks/rdt_default.go | 15 +- services/tasks/rdt_linux.go | 59 + services/tasks/service.go | 2 +- signals.go | 14 +- signals_unix.go | 43 - signals_windows.go | 63 - snapshots/benchsuite/benchmark.go | 1 + snapshots/benchsuite/benchmark_test.go | 6 +- snapshots/btrfs/btrfs.go | 14 +- snapshots/btrfs/btrfs_test.go | 48 +- snapshots/btrfs/plugin/plugin.go | 1 + snapshots/devmapper/README.md | 10 +- .../devmapper/blkdiscard/blkdiscard.go | 38 +- snapshots/devmapper/config.go | 31 +- snapshots/devmapper/config_test.go | 14 +- snapshots/devmapper/device_info.go | 1 + snapshots/devmapper/dmsetup/dmsetup.go | 61 +- snapshots/devmapper/dmsetup/dmsetup_test.go | 22 +- snapshots/devmapper/metadata.go | 25 +- snapshots/devmapper/metadata_test.go | 55 +- snapshots/devmapper/plugin/plugin.go | 4 +- snapshots/devmapper/pool_device.go | 101 +- snapshots/devmapper/pool_device_test.go | 29 +- snapshots/devmapper/snapshotter.go | 134 +- snapshots/devmapper/snapshotter_test.go | 137 +- snapshots/lcow/lcow.go | 36 +- snapshots/native/native.go | 42 +- snapshots/native/native_default.go | 1 + snapshots/native/native_freebsd.go | 2 - snapshots/overlay/overlay.go | 160 +- snapshots/overlay/overlay_test.go | 61 +- snapshots/overlay/overlayutils/check.go | 47 +- snapshots/overlay/overlayutils/check_test.go | 11 +- snapshots/overlay/plugin/plugin.go | 22 +- snapshots/snapshotter.go | 26 +- snapshots/storage/bolt.go | 70 +- snapshots/storage/metastore.go | 6 +- snapshots/storage/metastore_bench_test.go | 27 +- snapshots/storage/metastore_test.go | 41 +- snapshots/testsuite/helpers.go | 68 +- snapshots/testsuite/helpers_other.go | 1 + snapshots/testsuite/testsuite.go | 34 +- snapshots/testsuite/testsuite_unix.go | 1 + snapshots/windows/windows.go | 58 +- snapshotter_default_unix.go | 1 + snapshotter_opts_unix.go | 1 + sys/epoll.go | 1 + sys/fds.go | 5 +- sys/filesys_unix.go | 1 + sys/filesys_windows.go | 41 +- sys/oom_linux.go | 3 +- sys/oom_linux_test.go | 2 +- sys/oom_unsupported.go | 1 + sys/reaper/reaper_unix.go | 39 +- sys/socket_unix.go | 5 +- sys/socket_windows.go | 2 - sys/stat_bsd.go | 44 - sys/stat_openbsd.go | 45 - sys/stat_unix.go | 44 - task.go | 29 +- task_opts.go | 23 +- task_opts_unix.go | 3 +- test/build-test-images.sh | 15 +- test/build-utils.sh | 23 +- test/build.sh | 15 +- test/e2e_node/gci-init.sh | 31 + test/init-buildx.sh | 2 +- test/push.sh | 12 +- test/utils.sh | 6 +- tracing/log.go | 130 + tracing/plugin/otlp.go | 179 + .../tracing.go | 27 +- unpacker.go | 126 +- .../github.com/Microsoft/go-winio/.gitignore | 1 - .../github.com/Microsoft/go-winio/CODEOWNERS | 1 - vendor/github.com/Microsoft/go-winio/LICENSE | 22 - .../github.com/Microsoft/go-winio/README.md | 22 - .../github.com/Microsoft/go-winio/backup.go | 280 - .../Microsoft/go-winio/backuptar/noop.go | 4 - .../Microsoft/go-winio/backuptar/strconv.go | 68 - .../Microsoft/go-winio/backuptar/tar.go | 452 - vendor/github.com/Microsoft/go-winio/ea.go | 137 - vendor/github.com/Microsoft/go-winio/file.go | 323 - .../github.com/Microsoft/go-winio/fileinfo.go | 73 - .../github.com/Microsoft/go-winio/hvsock.go | 307 - vendor/github.com/Microsoft/go-winio/pipe.go | 517 - .../Microsoft/go-winio/pkg/etw/etw.go | 20 - .../Microsoft/go-winio/pkg/etw/eventdata.go | 73 - .../go-winio/pkg/etw/eventdatadescriptor.go | 29 - .../go-winio/pkg/etw/eventdescriptor.go | 84 - .../go-winio/pkg/etw/eventmetadata.go | 177 - .../Microsoft/go-winio/pkg/etw/eventopt.go | 75 - .../Microsoft/go-winio/pkg/etw/fieldopt.go | 516 - .../Microsoft/go-winio/pkg/etw/newprovider.go | 73 - .../pkg/etw/newprovider_unsupported.go | 9 - .../Microsoft/go-winio/pkg/etw/provider.go | 282 - .../go-winio/pkg/etw/providerglobal.go | 54 - .../Microsoft/go-winio/pkg/etw/ptr64_32.go | 16 - .../Microsoft/go-winio/pkg/etw/ptr64_64.go | 15 - .../Microsoft/go-winio/pkg/etw/wrapper_32.go | 52 - .../Microsoft/go-winio/pkg/etw/wrapper_64.go | 42 - .../go-winio/pkg/etw/zsyscall_windows.go | 103 - .../go-winio/pkg/etwlogrus/HookTest.wprp | 18 - .../Microsoft/go-winio/pkg/etwlogrus/hook.go | 107 - .../Microsoft/go-winio/pkg/fs/fs_windows.go | 31 - .../Microsoft/go-winio/pkg/guid/guid.go | 237 - .../pkg/security/grantvmgroupaccess.go | 161 - .../go-winio/pkg/security/syscall_windows.go | 7 - .../go-winio/pkg/security/zsyscall_windows.go | 70 - .../Microsoft/go-winio/privilege.go | 202 - .../github.com/Microsoft/go-winio/reparse.go | 128 - vendor/github.com/Microsoft/go-winio/sd.go | 98 - .../github.com/Microsoft/go-winio/syscall.go | 3 - .../github.com/Microsoft/go-winio/vhd/vhd.go | 323 - .../Microsoft/go-winio/vhd/zvhd_windows.go | 106 - .../Microsoft/go-winio/zsyscall_windows.go | 427 - .../Microsoft/hcsshim/.gitattributes | 1 - .../github.com/Microsoft/hcsshim/.gitignore | 3 - .../github.com/Microsoft/hcsshim/CODEOWNERS | 1 - vendor/github.com/Microsoft/hcsshim/LICENSE | 21 - vendor/github.com/Microsoft/hcsshim/README.md | 46 - .../containerd-shim-runhcs-v1/options/doc.go | 1 - .../options/next.pb.txt | 0 .../options/runhcs.pb.go | 1542 - .../options/runhcs.proto | 115 - .../containerd-shim-runhcs-v1/stats/doc.go | 6 - .../stats/next.pb.txt | 0 .../stats/stats.pb.go | 2819 -- .../stats/stats.proto | 70 - .../hcsshim/computestorage/attach.go | 38 - .../hcsshim/computestorage/destroy.go | 26 - .../hcsshim/computestorage/detach.go | 26 - .../hcsshim/computestorage/export.go | 46 - .../hcsshim/computestorage/format.go | 26 - .../hcsshim/computestorage/helpers.go | 193 - .../hcsshim/computestorage/import.go | 41 - .../hcsshim/computestorage/initialize.go | 38 - .../Microsoft/hcsshim/computestorage/mount.go | 27 - .../Microsoft/hcsshim/computestorage/setup.go | 74 - .../hcsshim/computestorage/storage.go | 50 - .../computestorage/zsyscall_windows.go | 319 - .../github.com/Microsoft/hcsshim/container.go | 223 - vendor/github.com/Microsoft/hcsshim/errors.go | 245 - .../ext4/internal/compactext4/compact.go | 1328 - .../hcsshim/ext4/internal/format/format.go | 411 - .../hcsshim/ext4/tar2ext4/tar2ext4.go | 209 - .../hcsshim/ext4/tar2ext4/vhdfooter.go | 76 - .../Microsoft/hcsshim/functional_tests.ps1 | 12 - .../github.com/Microsoft/hcsshim/hcn/hcn.go | 304 - .../Microsoft/hcsshim/hcn/hcnendpoint.go | 388 - .../Microsoft/hcsshim/hcn/hcnerrors.go | 164 - .../Microsoft/hcsshim/hcn/hcnglobals.go | 132 - .../Microsoft/hcsshim/hcn/hcnloadbalancer.go | 311 - .../Microsoft/hcsshim/hcn/hcnnamespace.go | 446 - .../Microsoft/hcsshim/hcn/hcnnetwork.go | 462 - .../Microsoft/hcsshim/hcn/hcnpolicy.go | 329 - .../Microsoft/hcsshim/hcn/hcnroute.go | 266 - .../Microsoft/hcsshim/hcn/hcnsupport.go | 143 - .../Microsoft/hcsshim/hcn/zsyscall_windows.go | 795 - .../github.com/Microsoft/hcsshim/hcsshim.go | 28 - .../Microsoft/hcsshim/hnsendpoint.go | 118 - .../Microsoft/hcsshim/hnsglobals.go | 16 - .../Microsoft/hcsshim/hnsnetwork.go | 36 - .../github.com/Microsoft/hcsshim/hnspolicy.go | 60 - .../Microsoft/hcsshim/hnspolicylist.go | 47 - .../Microsoft/hcsshim/hnssupport.go | 13 - .../github.com/Microsoft/hcsshim/interface.go | 114 - .../hcsshim/internal/cni/registry.go | 110 - .../Microsoft/hcsshim/internal/cow/cow.go | 91 - .../hcsshim/internal/hcs/callback.go | 161 - .../Microsoft/hcsshim/internal/hcs/errors.go | 327 - .../Microsoft/hcsshim/internal/hcs/process.go | 521 - .../hcsshim/internal/hcs/schema1/schema1.go | 250 - .../internal/hcs/schema2/attachment.go | 30 - .../hcsshim/internal/hcs/schema2/battery.go | 13 - .../hcs/schema2/cache_query_stats_response.go | 18 - .../hcsshim/internal/hcs/schema2/chipset.go | 27 - .../internal/hcs/schema2/close_handle.go | 14 - .../hcsshim/internal/hcs/schema2/com_port.go | 17 - .../internal/hcs/schema2/compute_system.go | 26 - .../internal/hcs/schema2/configuration.go | 72 - .../internal/hcs/schema2/console_size.go | 16 - .../hcsshim/internal/hcs/schema2/container.go | 34 - ...r_credential_guard_add_instance_request.go | 16 - ...edential_guard_hv_socket_service_config.go | 15 - .../container_credential_guard_instance.go | 16 - ...ainer_credential_guard_modify_operation.go | 17 - ...iner_credential_guard_operation_request.go | 15 - ...redential_guard_remove_instance_request.go | 14 - .../container_credential_guard_state.go | 25 - .../container_credential_guard_system_info.go | 14 - .../schema2/container_memory_information.go | 25 - .../hcsshim/internal/hcs/schema2/cpu_group.go | 15 - .../hcs/schema2/cpu_group_affinity.go | 15 - .../internal/hcs/schema2/cpu_group_config.go | 18 - .../hcs/schema2/cpu_group_configurations.go | 15 - .../hcs/schema2/cpu_group_operations.go | 18 - .../hcs/schema2/cpu_group_property.go | 15 - .../hcs/schema2/create_group_operation.go | 17 - .../hcs/schema2/delete_group_operation.go | 15 - .../hcsshim/internal/hcs/schema2/device.go | 27 - .../hcsshim/internal/hcs/schema2/devices.go | 46 - .../hcs/schema2/enhanced_mode_video.go | 14 - .../hcs/schema2/flexible_io_device.go | 18 - .../internal/hcs/schema2/guest_connection.go | 19 - .../hcs/schema2/guest_connection_info.go | 21 - .../hcs/schema2/guest_crash_reporting.go | 14 - .../hcsshim/internal/hcs/schema2/guest_os.go | 14 - .../internal/hcs/schema2/guest_state.go | 22 - .../schema2/host_processor_modify_request.go | 16 - .../internal/hcs/schema2/hosted_system.go | 16 - .../hcsshim/internal/hcs/schema2/hv_socket.go | 16 - .../internal/hcs/schema2/hv_socket_2.go | 15 - .../internal/hcs/schema2/hv_socket_address.go | 17 - .../hcs/schema2/hv_socket_service_config.go | 28 - .../hcs/schema2/hv_socket_system_config.go | 22 - .../hcs/schema2/interrupt_moderation_mode.go | 42 - .../internal/hcs/schema2/iov_settings.go | 22 - .../hcsshim/internal/hcs/schema2/keyboard.go | 13 - .../hcsshim/internal/hcs/schema2/layer.go | 21 - .../hcs/schema2/linux_kernel_direct.go | 18 - .../internal/hcs/schema2/logical_processor.go | 18 - .../internal/hcs/schema2/mapped_directory.go | 20 - .../internal/hcs/schema2/mapped_pipe.go | 18 - .../hcsshim/internal/hcs/schema2/memory.go | 14 - .../hcsshim/internal/hcs/schema2/memory_2.go | 49 - .../hcs/schema2/memory_information_for_vm.go | 18 - .../internal/hcs/schema2/memory_stats.go | 19 - .../hcs/schema2/modification_request.go | 15 - .../hcs/schema2/modify_setting_request.go | 20 - .../hcsshim/internal/hcs/schema2/mouse.go | 13 - .../internal/hcs/schema2/network_adapter.go | 17 - .../internal/hcs/schema2/networking.go | 23 - .../hcs/schema2/pause_notification.go | 15 - .../internal/hcs/schema2/pause_options.go | 17 - .../hcsshim/internal/hcs/schema2/plan9.go | 14 - .../internal/hcs/schema2/plan9_share.go | 34 - .../internal/hcs/schema2/process_details.go | 33 - .../hcs/schema2/process_modify_request.go | 19 - .../hcs/schema2/process_parameters.go | 46 - .../internal/hcs/schema2/process_status.go | 21 - .../hcsshim/internal/hcs/schema2/processor.go | 18 - .../internal/hcs/schema2/processor_2.go | 23 - .../internal/hcs/schema2/processor_stats.go | 19 - .../hcs/schema2/processor_topology.go | 15 - .../internal/hcs/schema2/properties.go | 54 - .../internal/hcs/schema2/property_query.go | 15 - .../internal/hcs/schema2/property_type.go | 26 - .../hcs/schema2/rdp_connection_options.go | 16 - .../internal/hcs/schema2/registry_changes.go | 16 - .../internal/hcs/schema2/registry_key.go | 18 - .../internal/hcs/schema2/registry_value.go | 30 - .../internal/hcs/schema2/restore_state.go | 19 - .../internal/hcs/schema2/save_options.go | 19 - .../hcsshim/internal/hcs/schema2/scsi.go | 16 - .../hcs/schema2/service_properties.go | 18 - .../schema2/shared_memory_configuration.go | 14 - .../hcs/schema2/shared_memory_region.go | 22 - .../hcs/schema2/shared_memory_region_info.go | 16 - .../internal/hcs/schema2/silo_properties.go | 17 - .../internal/hcs/schema2/statistics.go | 29 - .../hcsshim/internal/hcs/schema2/storage.go | 21 - .../internal/hcs/schema2/storage_qo_s.go | 16 - .../internal/hcs/schema2/storage_stats.go | 21 - .../hcsshim/internal/hcs/schema2/topology.go | 16 - .../hcsshim/internal/hcs/schema2/uefi.go | 20 - .../internal/hcs/schema2/uefi_boot_entry.go | 22 - .../hcsshim/internal/hcs/schema2/version.go | 16 - .../internal/hcs/schema2/video_monitor.go | 18 - .../internal/hcs/schema2/virtual_machine.go | 32 - .../internal/hcs/schema2/virtual_node_info.go | 20 - .../hcs/schema2/virtual_p_mem_controller.go | 20 - .../hcs/schema2/virtual_p_mem_device.go | 18 - .../hcs/schema2/virtual_pci_device.go | 16 - .../hcs/schema2/virtual_pci_function.go | 18 - .../internal/hcs/schema2/virtual_smb.go | 16 - .../internal/hcs/schema2/virtual_smb_share.go | 20 - .../hcs/schema2/virtual_smb_share_options.go | 62 - .../hcsshim/internal/hcs/schema2/vm_memory.go | 26 - .../hcs/schema2/vm_processor_limits.go | 22 - .../hcs/schema2/windows_crash_reporting.go | 16 - .../Microsoft/hcsshim/internal/hcs/service.go | 49 - .../Microsoft/hcsshim/internal/hcs/system.go | 637 - .../Microsoft/hcsshim/internal/hcs/utils.go | 62 - .../hcsshim/internal/hcs/waithelper.go | 68 - .../hcsshim/internal/hcserror/hcserror.go | 47 - .../Microsoft/hcsshim/internal/hns/hns.go | 23 - .../hcsshim/internal/hns/hnsendpoint.go | 337 - .../hcsshim/internal/hns/hnsfuncs.go | 49 - .../hcsshim/internal/hns/hnsglobals.go | 28 - .../hcsshim/internal/hns/hnsnetwork.go | 141 - .../hcsshim/internal/hns/hnspolicy.go | 109 - .../hcsshim/internal/hns/hnspolicylist.go | 201 - .../hcsshim/internal/hns/hnssupport.go | 49 - .../hcsshim/internal/hns/namespace.go | 111 - .../hcsshim/internal/hns/zsyscall_windows.go | 76 - .../hcsshim/internal/interop/interop.go | 23 - .../internal/interop/zsyscall_windows.go | 48 - .../Microsoft/hcsshim/internal/log/g.go | 23 - .../hcsshim/internal/logfields/fields.go | 32 - .../hcsshim/internal/longpath/longpath.go | 24 - .../hcsshim/internal/mergemaps/merge.go | 52 - .../Microsoft/hcsshim/internal/oc/exporter.go | 43 - .../Microsoft/hcsshim/internal/oc/span.go | 17 - .../hcsshim/internal/regstate/regstate.go | 288 - .../internal/regstate/zsyscall_windows.go | 51 - .../hcsshim/internal/runhcs/container.go | 71 - .../Microsoft/hcsshim/internal/runhcs/util.go | 16 - .../Microsoft/hcsshim/internal/runhcs/vm.go | 43 - .../hcsshim/internal/safefile/safeopen.go | 375 - .../hcsshim/internal/timeout/timeout.go | 74 - .../hcsshim/internal/vmcompute/vmcompute.go | 610 - .../internal/vmcompute/zsyscall_windows.go | 581 - .../hcsshim/internal/wclayer/activatelayer.go | 27 - .../hcsshim/internal/wclayer/baselayer.go | 182 - .../hcsshim/internal/wclayer/createlayer.go | 27 - .../internal/wclayer/createscratchlayer.go | 34 - .../internal/wclayer/deactivatelayer.go | 24 - .../hcsshim/internal/wclayer/destroylayer.go | 25 - .../internal/wclayer/expandscratchsize.go | 140 - .../hcsshim/internal/wclayer/exportlayer.go | 94 - .../internal/wclayer/getlayermountpath.go | 50 - .../internal/wclayer/getsharedbaseimages.go | 29 - .../hcsshim/internal/wclayer/grantvmaccess.go | 26 - .../hcsshim/internal/wclayer/importlayer.go | 166 - .../hcsshim/internal/wclayer/layerexists.go | 28 - .../hcsshim/internal/wclayer/layerid.go | 22 - .../hcsshim/internal/wclayer/layerutils.go | 97 - .../hcsshim/internal/wclayer/legacy.go | 811 - .../hcsshim/internal/wclayer/nametoguid.go | 29 - .../hcsshim/internal/wclayer/preparelayer.go | 44 - .../hcsshim/internal/wclayer/processimage.go | 41 - .../internal/wclayer/unpreparelayer.go | 25 - .../hcsshim/internal/wclayer/wclayer.go | 35 - .../internal/wclayer/zsyscall_windows.go | 569 - .../hcsshim/internal/winapi/devices.go | 13 - .../hcsshim/internal/winapi/errors.go | 15 - .../hcsshim/internal/winapi/filesystem.go | 110 - .../Microsoft/hcsshim/internal/winapi/iocp.go | 3 - .../hcsshim/internal/winapi/jobobject.go | 215 - .../hcsshim/internal/winapi/logon.go | 30 - .../hcsshim/internal/winapi/memory.go | 27 - .../Microsoft/hcsshim/internal/winapi/net.go | 3 - .../Microsoft/hcsshim/internal/winapi/path.go | 11 - .../hcsshim/internal/winapi/process.go | 10 - .../hcsshim/internal/winapi/processor.go | 7 - .../hcsshim/internal/winapi/system.go | 52 - .../hcsshim/internal/winapi/thread.go | 12 - .../hcsshim/internal/winapi/utils.go | 75 - .../hcsshim/internal/winapi/winapi.go | 5 - .../internal/winapi/zsyscall_windows.go | 371 - vendor/github.com/Microsoft/hcsshim/layer.go | 107 - .../hcsshim/osversion/osversion_windows.go | 50 - .../hcsshim/osversion/windowsbuilds.go | 38 - .../Microsoft/hcsshim/pkg/go-runhcs/LICENSE | 201 - .../Microsoft/hcsshim/pkg/go-runhcs/NOTICE | 22 - .../Microsoft/hcsshim/pkg/go-runhcs/runhcs.go | 173 - .../pkg/go-runhcs/runhcs_create-scratch.go | 54 - .../hcsshim/pkg/go-runhcs/runhcs_create.go | 101 - .../hcsshim/pkg/go-runhcs/runhcs_delete.go | 33 - .../hcsshim/pkg/go-runhcs/runhcs_exec.go | 88 - .../hcsshim/pkg/go-runhcs/runhcs_kill.go | 11 - .../hcsshim/pkg/go-runhcs/runhcs_list.go | 28 - .../hcsshim/pkg/go-runhcs/runhcs_pause.go | 10 - .../hcsshim/pkg/go-runhcs/runhcs_ps.go | 20 - .../pkg/go-runhcs/runhcs_resize-tty.go | 33 - .../hcsshim/pkg/go-runhcs/runhcs_resume.go | 10 - .../hcsshim/pkg/go-runhcs/runhcs_start.go | 10 - .../hcsshim/pkg/go-runhcs/runhcs_state.go | 20 - .../hcsshim/pkg/ociwclayer/export.go | 88 - .../hcsshim/pkg/ociwclayer/import.go | 148 - .../github.com/Microsoft/hcsshim/process.go | 98 - .../Microsoft/hcsshim/zsyscall_windows.go | 54 - vendor/github.com/beorn7/perks/LICENSE | 20 - .../beorn7/perks/quantile/exampledata.txt | 2388 - .../beorn7/perks/quantile/stream.go | 316 - .../bits-and-blooms/bitset/.gitignore | 26 - .../bits-and-blooms/bitset/.travis.yml | 37 - .../github.com/bits-and-blooms/bitset/LICENSE | 27 - .../bits-and-blooms/bitset/README.md | 93 - .../bitset/azure-pipelines.yml | 39 - .../bits-and-blooms/bitset/bitset.go | 952 - .../bits-and-blooms/bitset/popcnt.go | 53 - .../bits-and-blooms/bitset/popcnt_19.go | 45 - .../bits-and-blooms/bitset/popcnt_amd64.go | 68 - .../bits-and-blooms/bitset/popcnt_amd64.s | 104 - .../bits-and-blooms/bitset/popcnt_generic.go | 24 - .../bitset/trailing_zeros_18.go | 14 - .../bitset/trailing_zeros_19.go | 9 - .../github.com/cespare/xxhash/v2/.travis.yml | 8 - .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 - vendor/github.com/cespare/xxhash/v2/README.md | 67 - vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 - .../cespare/xxhash/v2/xxhash_amd64.go | 13 - .../cespare/xxhash/v2/xxhash_amd64.s | 215 - .../cespare/xxhash/v2/xxhash_other.go | 76 - .../cespare/xxhash/v2/xxhash_safe.go | 15 - .../cespare/xxhash/v2/xxhash_unsafe.go | 46 - vendor/github.com/cilium/ebpf/.clang-format | 17 - vendor/github.com/cilium/ebpf/.gitignore | 13 - vendor/github.com/cilium/ebpf/.golangci.yaml | 29 - vendor/github.com/cilium/ebpf/ARCHITECTURE.md | 80 - .../github.com/cilium/ebpf/CODE_OF_CONDUCT.md | 46 - vendor/github.com/cilium/ebpf/CONTRIBUTING.md | 40 - vendor/github.com/cilium/ebpf/LICENSE | 23 - vendor/github.com/cilium/ebpf/Makefile | 70 - vendor/github.com/cilium/ebpf/README.md | 62 - vendor/github.com/cilium/ebpf/asm/alu.go | 149 - .../github.com/cilium/ebpf/asm/alu_string.go | 107 - vendor/github.com/cilium/ebpf/asm/doc.go | 2 - vendor/github.com/cilium/ebpf/asm/func.go | 195 - .../github.com/cilium/ebpf/asm/func_string.go | 185 - .../github.com/cilium/ebpf/asm/instruction.go | 506 - vendor/github.com/cilium/ebpf/asm/jump.go | 109 - .../github.com/cilium/ebpf/asm/jump_string.go | 53 - .../github.com/cilium/ebpf/asm/load_store.go | 204 - .../cilium/ebpf/asm/load_store_string.go | 80 - vendor/github.com/cilium/ebpf/asm/opcode.go | 237 - .../cilium/ebpf/asm/opcode_string.go | 38 - vendor/github.com/cilium/ebpf/asm/register.go | 49 - vendor/github.com/cilium/ebpf/collection.go | 616 - vendor/github.com/cilium/ebpf/doc.go | 16 - vendor/github.com/cilium/ebpf/elf_reader.go | 953 - .../github.com/cilium/ebpf/elf_reader_fuzz.go | 21 - vendor/github.com/cilium/ebpf/info.go | 239 - .../cilium/ebpf/internal/btf/btf.go | 799 - .../cilium/ebpf/internal/btf/btf_types.go | 282 - .../ebpf/internal/btf/btf_types_string.go | 44 - .../cilium/ebpf/internal/btf/core.go | 887 - .../cilium/ebpf/internal/btf/doc.go | 8 - .../cilium/ebpf/internal/btf/ext_info.go | 303 - .../cilium/ebpf/internal/btf/fuzz.go | 49 - .../cilium/ebpf/internal/btf/strings.go | 60 - .../cilium/ebpf/internal/btf/types.go | 893 - vendor/github.com/cilium/ebpf/internal/cpu.go | 62 - vendor/github.com/cilium/ebpf/internal/elf.go | 68 - .../github.com/cilium/ebpf/internal/endian.go | 29 - .../github.com/cilium/ebpf/internal/errors.go | 51 - vendor/github.com/cilium/ebpf/internal/fd.go | 69 - .../cilium/ebpf/internal/feature.go | 100 - vendor/github.com/cilium/ebpf/internal/io.go | 16 - .../cilium/ebpf/internal/pinning.go | 44 - vendor/github.com/cilium/ebpf/internal/ptr.go | 31 - .../cilium/ebpf/internal/ptr_32_be.go | 14 - .../cilium/ebpf/internal/ptr_32_le.go | 14 - .../github.com/cilium/ebpf/internal/ptr_64.go | 14 - .../cilium/ebpf/internal/syscall.go | 245 - .../cilium/ebpf/internal/syscall_string.go | 56 - .../cilium/ebpf/internal/unix/types_linux.go | 204 - .../cilium/ebpf/internal/unix/types_other.go | 263 - .../cilium/ebpf/internal/version.go | 163 - vendor/github.com/cilium/ebpf/link/cgroup.go | 171 - vendor/github.com/cilium/ebpf/link/doc.go | 2 - vendor/github.com/cilium/ebpf/link/iter.go | 100 - vendor/github.com/cilium/ebpf/link/kprobe.go | 438 - vendor/github.com/cilium/ebpf/link/link.go | 229 - vendor/github.com/cilium/ebpf/link/netns.go | 60 - .../github.com/cilium/ebpf/link/perf_event.go | 273 - .../github.com/cilium/ebpf/link/platform.go | 25 - vendor/github.com/cilium/ebpf/link/program.go | 76 - .../cilium/ebpf/link/raw_tracepoint.go | 61 - .../github.com/cilium/ebpf/link/syscalls.go | 190 - .../github.com/cilium/ebpf/link/tracepoint.go | 56 - vendor/github.com/cilium/ebpf/link/uprobe.go | 237 - vendor/github.com/cilium/ebpf/linker.go | 140 - vendor/github.com/cilium/ebpf/map.go | 1232 - vendor/github.com/cilium/ebpf/marshalers.go | 218 - vendor/github.com/cilium/ebpf/prog.go | 728 - vendor/github.com/cilium/ebpf/run-tests.sh | 123 - vendor/github.com/cilium/ebpf/syscalls.go | 480 - vendor/github.com/cilium/ebpf/types.go | 248 - vendor/github.com/cilium/ebpf/types_string.go | 172 - vendor/github.com/containerd/btrfs/.gitignore | 28 - vendor/github.com/containerd/btrfs/LICENSE | 201 - vendor/github.com/containerd/btrfs/README.md | 46 - vendor/github.com/containerd/btrfs/btrfs.c | 33 - vendor/github.com/containerd/btrfs/btrfs.go | 412 - vendor/github.com/containerd/btrfs/btrfs.h | 37 - vendor/github.com/containerd/btrfs/helpers.go | 102 - vendor/github.com/containerd/btrfs/info.go | 45 - .../github.com/containerd/cgroups/.gitignore | 2 - vendor/github.com/containerd/cgroups/LICENSE | 201 - vendor/github.com/containerd/cgroups/Makefile | 24 - .../containerd/cgroups/Protobuild.toml | 46 - .../github.com/containerd/cgroups/README.md | 149 - .../github.com/containerd/cgroups/Vagrantfile | 46 - vendor/github.com/containerd/cgroups/blkio.go | 358 - .../github.com/containerd/cgroups/cgroup.go | 552 - .../github.com/containerd/cgroups/control.go | 92 - vendor/github.com/containerd/cgroups/cpu.go | 125 - .../github.com/containerd/cgroups/cpuacct.go | 123 - .../github.com/containerd/cgroups/cpuset.go | 159 - .../github.com/containerd/cgroups/devices.go | 92 - .../github.com/containerd/cgroups/errors.go | 47 - .../github.com/containerd/cgroups/freezer.go | 82 - .../containerd/cgroups/hierarchy.go | 20 - .../github.com/containerd/cgroups/hugetlb.go | 109 - .../github.com/containerd/cgroups/memory.go | 480 - vendor/github.com/containerd/cgroups/named.go | 39 - .../github.com/containerd/cgroups/net_cls.go | 61 - .../github.com/containerd/cgroups/net_prio.go | 65 - vendor/github.com/containerd/cgroups/opts.go | 61 - vendor/github.com/containerd/cgroups/paths.go | 107 - .../containerd/cgroups/perf_event.go | 37 - vendor/github.com/containerd/cgroups/pids.go | 86 - vendor/github.com/containerd/cgroups/rdma.go | 154 - .../containerd/cgroups/stats/v1/metrics.pb.go | 6125 --- .../cgroups/stats/v1/metrics.pb.txt | 790 - .../containerd/cgroups/stats/v1/metrics.proto | 158 - .../containerd/cgroups/subsystem.go | 116 - .../github.com/containerd/cgroups/systemd.go | 155 - vendor/github.com/containerd/cgroups/ticks.go | 26 - vendor/github.com/containerd/cgroups/utils.go | 404 - vendor/github.com/containerd/cgroups/v1.go | 73 - .../github.com/containerd/cgroups/v2/cpu.go | 83 - .../containerd/cgroups/v2/devicefilter.go | 199 - .../github.com/containerd/cgroups/v2/ebpf.go | 95 - .../containerd/cgroups/v2/errors.go | 46 - vendor/github.com/containerd/cgroups/v2/io.go | 64 - .../containerd/cgroups/v2/manager.go | 782 - .../containerd/cgroups/v2/memory.go | 52 - .../github.com/containerd/cgroups/v2/paths.go | 60 - .../github.com/containerd/cgroups/v2/pids.go | 37 - .../github.com/containerd/cgroups/v2/state.go | 65 - .../containerd/cgroups/v2/stats/metrics.pb.go | 3992 -- .../cgroups/v2/stats/metrics.pb.txt | 539 - .../containerd/cgroups/v2/stats/metrics.proto | 105 - .../github.com/containerd/cgroups/v2/utils.go | 436 - .../containerd/console/.golangci.yml | 20 - vendor/github.com/containerd/console/LICENSE | 191 - .../github.com/containerd/console/README.md | 29 - .../github.com/containerd/console/console.go | 87 - .../containerd/console/console_linux.go | 280 - .../containerd/console/console_unix.go | 156 - .../containerd/console/console_windows.go | 216 - .../containerd/console/pty_freebsd_cgo.go | 45 - .../containerd/console/pty_freebsd_nocgo.go | 36 - .../containerd/console/tc_freebsd_cgo.go | 57 - .../containerd/console/tc_freebsd_nocgo.go | 55 - .../github.com/containerd/console/tc_linux.go | 51 - .../containerd/console/tc_netbsd.go | 45 - .../containerd/console/tc_openbsd_cgo.go | 51 - .../containerd/console/tc_openbsd_nocgo.go | 47 - .../containerd/console/tc_solaris_cgo.go | 51 - .../containerd/console/tc_solaris_nocgo.go | 47 - .../github.com/containerd/console/tc_unix.go | 91 - .../containerd/continuity/.gitignore | 25 - .../containerd/continuity/.golangci.yml | 18 - .../github.com/containerd/continuity/.mailmap | 1 - .../github.com/containerd/continuity/AUTHORS | 40 - .../github.com/containerd/continuity/LICENSE | 191 - .../github.com/containerd/continuity/Makefile | 82 - .../containerd/continuity/README.md | 88 - .../containerd/continuity/context.go | 667 - .../containerd/continuity/devices/devices.go | 21 - .../continuity/devices/devices_unix.go | 75 - .../continuity/devices/mknod_freebsd.go | 25 - .../containerd/continuity/digests.go | 100 - .../containerd/continuity/driver/driver.go | 178 - .../continuity/driver/driver_unix.go | 133 - .../continuity/driver/driver_windows.go | 41 - .../continuity/driver/lchmod_linux.go | 39 - .../containerd/continuity/driver/utils.go | 90 - .../containerd/continuity/fs/copy.go | 191 - .../containerd/continuity/fs/copy_freebsd.go | 42 - .../containerd/continuity/fs/copy_linux.go | 150 - .../containerd/continuity/fs/copy_unix.go | 105 - .../containerd/continuity/fs/copy_windows.go | 49 - .../containerd/continuity/fs/diff.go | 326 - .../containerd/continuity/fs/diff_unix.go | 74 - .../containerd/continuity/fs/dtype_linux.go | 103 - .../github.com/containerd/continuity/fs/du.go | 38 - .../containerd/continuity/fs/du_unix.go | 120 - .../containerd/continuity/fs/du_windows.go | 82 - .../continuity/fs/fstest/compare.go | 69 - .../continuity/fs/fstest/compare_unix.go | 21 - .../continuity/fs/fstest/compare_windows.go | 24 - .../continuity/fs/fstest/continuity_util.go | 215 - .../containerd/continuity/fs/fstest/file.go | 184 - .../continuity/fs/fstest/file_unix.go | 53 - .../continuity/fs/fstest/file_windows.go | 45 - .../continuity/fs/fstest/testsuite.go | 237 - .../containerd/continuity/fs/hardlink.go | 43 - .../continuity/fs/hardlink_windows.go | 23 - .../containerd/continuity/fs/path.go | 311 - .../continuity/fs/stat_darwinfreebsd.go | 44 - .../continuity/fs/stat_linuxopenbsd.go | 45 - .../containerd/continuity/fs/time.go | 29 - .../containerd/continuity/groups_unix.go | 130 - .../containerd/continuity/hardlinks.go | 73 - .../containerd/continuity/hardlinks_unix.go | 53 - .../continuity/hardlinks_windows.go | 28 - .../containerd/continuity/ioutils.go | 63 - .../containerd/continuity/manifest.go | 162 - .../continuity/pathdriver/path_driver.go | 101 - .../continuity/proto/manifest.pb.go | 292 - .../continuity/proto/manifest.proto | 97 - .../containerd/continuity/resource.go | 590 - .../containerd/continuity/resource_unix.go | 53 - .../containerd/continuity/resource_windows.go | 28 - .../containerd/continuity/sysx/README.md | 3 - .../containerd/continuity/sysx/generate.sh | 52 - .../continuity/sysx/nodata_solaris.go | 24 - .../containerd/continuity/sysx/xattr.go | 117 - .../continuity/sysx/xattr_unsupported.go | 67 - .../continuity/testutil/helpers_unix.go | 57 - .../continuity/testutil/helpers_windows.go | 32 - .../testutil/loopback/loopback_linux.go | 115 - .../continuity/testutil/mount_linux.go | 21 - .../continuity/testutil/mount_other.go | 21 - .../github.com/containerd/fifo/.gitattributes | 1 - vendor/github.com/containerd/fifo/.gitignore | 2 - .../github.com/containerd/fifo/.golangci.yml | 20 - vendor/github.com/containerd/fifo/LICENSE | 201 - vendor/github.com/containerd/fifo/Makefile | 24 - vendor/github.com/containerd/fifo/errors.go | 28 - vendor/github.com/containerd/fifo/fifo.go | 258 - .../containerd/fifo/handle_linux.go | 100 - .../containerd/fifo/handle_nolinux.go | 65 - vendor/github.com/containerd/fifo/raw.go | 114 - vendor/github.com/containerd/fifo/readme.md | 46 - .../containerd/go-cni/.golangci.yml | 23 - vendor/github.com/containerd/go-cni/LICENSE | 201 - vendor/github.com/containerd/go-cni/README.md | 95 - vendor/github.com/containerd/go-cni/cni.go | 228 - .../containerd/go-cni/deprecated.go | 34 - vendor/github.com/containerd/go-cni/errors.go | 55 - vendor/github.com/containerd/go-cni/helper.go | 41 - .../github.com/containerd/go-cni/namespace.go | 77 - .../containerd/go-cni/namespace_opts.go | 75 - vendor/github.com/containerd/go-cni/opts.go | 264 - vendor/github.com/containerd/go-cni/result.go | 107 - .../github.com/containerd/go-cni/testutils.go | 78 - vendor/github.com/containerd/go-cni/types.go | 65 - .../github.com/containerd/go-runc/.travis.yml | 21 - vendor/github.com/containerd/go-runc/LICENSE | 201 - .../github.com/containerd/go-runc/README.md | 25 - .../containerd/go-runc/command_linux.go | 56 - .../github.com/containerd/go-runc/console.go | 165 - .../containerd/go-runc/container.go | 30 - .../github.com/containerd/go-runc/events.go | 100 - vendor/github.com/containerd/go-runc/io.go | 218 - .../github.com/containerd/go-runc/io_unix.go | 96 - .../containerd/go-runc/io_windows.go | 62 - .../github.com/containerd/go-runc/monitor.go | 76 - vendor/github.com/containerd/go-runc/runc.go | 741 - .../containerd/go-runc/runc_unix.go | 38 - .../containerd/go-runc/runc_windows.go | 31 - vendor/github.com/containerd/go-runc/utils.go | 111 - .../github.com/containerd/imgcrypt/.gitignore | 3 +- .../containerd/imgcrypt/.golangci.yml | 2 +- vendor/github.com/containerd/imgcrypt/CHANGES | 23 + .../containerd/imgcrypt/MAINTAINERS | 4 +- .../github.com/containerd/imgcrypt/Makefile | 12 +- .../github.com/containerd/imgcrypt/README.md | 6 +- .../images/encryption/any.go} | 31 +- .../imgcrypt/images/encryption/client.go | 12 +- .../imgcrypt/images/encryption/encryption.go | 72 +- .../github.com/containerd/imgcrypt/payload.go | 1 + vendor/github.com/containerd/ttrpc/.gitignore | 11 - vendor/github.com/containerd/ttrpc/LICENSE | 201 - vendor/github.com/containerd/ttrpc/README.md | 58 - vendor/github.com/containerd/ttrpc/channel.go | 153 - vendor/github.com/containerd/ttrpc/client.go | 409 - vendor/github.com/containerd/ttrpc/codec.go | 43 - vendor/github.com/containerd/ttrpc/config.go | 52 - .../github.com/containerd/ttrpc/handshake.go | 50 - .../containerd/ttrpc/interceptor.go | 50 - .../github.com/containerd/ttrpc/metadata.go | 107 - .../containerd/ttrpc/plugin/generator.go | 147 - vendor/github.com/containerd/ttrpc/server.go | 500 - .../github.com/containerd/ttrpc/services.go | 166 - vendor/github.com/containerd/ttrpc/types.go | 63 - .../containerd/ttrpc/unixcreds_linux.go | 109 - .../github.com/containerd/typeurl/.gitignore | 2 - vendor/github.com/containerd/typeurl/LICENSE | 191 - .../github.com/containerd/typeurl/README.md | 20 - vendor/github.com/containerd/typeurl/doc.go | 83 - vendor/github.com/containerd/typeurl/types.go | 214 - vendor/github.com/containerd/zfs/README.md | 12 +- .../containerd/zfs/plugin/plugin.go | 8 +- vendor/github.com/containerd/zfs/zfs.go | 27 +- .../containernetworking/cni/LICENSE | 202 - .../containernetworking/cni/libcni/api.go | 673 - .../containernetworking/cni/libcni/conf.go | 268 - .../cni/pkg/invoke/args.go | 128 - .../cni/pkg/invoke/delegate.go | 80 - .../cni/pkg/invoke/exec.go | 144 - .../cni/pkg/invoke/find.go | 48 - .../cni/pkg/invoke/os_unix.go | 20 - .../cni/pkg/invoke/os_windows.go | 18 - .../cni/pkg/invoke/raw_exec.go | 88 - .../cni/pkg/types/020/types.go | 126 - .../containernetworking/cni/pkg/types/args.go | 112 - .../cni/pkg/types/current/types.go | 276 - .../cni/pkg/types/types.go | 207 - .../cni/pkg/utils/utils.go | 84 - .../cni/pkg/version/conf.go | 37 - .../cni/pkg/version/plugin.go | 144 - .../cni/pkg/version/reconcile.go | 49 - .../cni/pkg/version/version.go | 83 - .../containernetworking/plugins/LICENSE | 201 - .../plugins/pkg/ns/README.md | 41 - .../plugins/pkg/ns/ns_linux.go | 234 - .../containers/ocicrypt/.travis.yml | 29 - .../containers/ocicrypt/CODE-OF-CONDUCT.md | 3 - vendor/github.com/containers/ocicrypt/LICENSE | 189 - .../containers/ocicrypt/MAINTAINERS | 5 - .../github.com/containers/ocicrypt/Makefile | 34 - .../github.com/containers/ocicrypt/README.md | 44 - .../containers/ocicrypt/SECURITY.md | 3 - .../ocicrypt/blockcipher/blockcipher.go | 160 - .../blockcipher/blockcipher_aes_ctr.go | 193 - .../containers/ocicrypt/config/config.go | 114 - .../ocicrypt/config/constructors.go | 245 - .../config/keyprovider-config/config.go | 81 - .../ocicrypt/crypto/pkcs11/common.go | 134 - .../ocicrypt/crypto/pkcs11/pkcs11helpers.go | 487 - .../crypto/pkcs11/pkcs11helpers_nocgo.go | 31 - .../ocicrypt/crypto/pkcs11/utils.go | 114 - .../containers/ocicrypt/encryption.go | 350 - vendor/github.com/containers/ocicrypt/gpg.go | 425 - .../containers/ocicrypt/gpgvault.go | 100 - .../ocicrypt/keywrap/jwe/keywrapper_jwe.go | 136 - .../keywrap/keyprovider/keyprovider.go | 242 - .../containers/ocicrypt/keywrap/keywrap.go | 48 - .../ocicrypt/keywrap/pgp/keywrapper_gpg.go | 273 - .../keywrap/pkcs11/keywrapper_pkcs11.go | 147 - .../keywrap/pkcs7/keywrapper_pkcs7.go | 136 - .../github.com/containers/ocicrypt/reader.go | 40 - .../containers/ocicrypt/spec/spec.go | 12 - .../ocicrypt/utils/delayedreader.go | 109 - .../containers/ocicrypt/utils/ioutils.go | 56 - .../utils/keyprovider/keyprovider.pb.go | 243 - .../utils/keyprovider/keyprovider.proto | 17 - .../containers/ocicrypt/utils/testing.go | 166 - .../containers/ocicrypt/utils/utils.go | 250 - .../github.com/coreos/go-systemd/v22/LICENSE | 191 - .../github.com/coreos/go-systemd/v22/NOTICE | 5 - .../coreos/go-systemd/v22/daemon/sdnotify.go | 84 - .../coreos/go-systemd/v22/daemon/watchdog.go | 73 - .../coreos/go-systemd/v22/dbus/dbus.go | 261 - .../coreos/go-systemd/v22/dbus/methods.go | 830 - .../coreos/go-systemd/v22/dbus/properties.go | 237 - .../coreos/go-systemd/v22/dbus/set.go | 47 - .../go-systemd/v22/dbus/subscription.go | 333 - .../go-systemd/v22/dbus/subscription_set.go | 57 - .../cpuguy83/go-md2man/v2/LICENSE.md | 21 - .../cpuguy83/go-md2man/v2/md2man/md2man.go | 14 - .../cpuguy83/go-md2man/v2/md2man/roff.go | 345 - vendor/github.com/davecgh/go-spew/LICENSE | 15 - .../github.com/davecgh/go-spew/spew/bypass.go | 145 - .../davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 - .../github.com/davecgh/go-spew/spew/config.go | 306 - vendor/github.com/davecgh/go-spew/spew/doc.go | 211 - .../github.com/davecgh/go-spew/spew/dump.go | 509 - .../github.com/davecgh/go-spew/spew/format.go | 419 - .../github.com/davecgh/go-spew/spew/spew.go | 148 - vendor/github.com/docker/go-events/.gitignore | 24 - .../docker/go-events/CONTRIBUTING.md | 70 - vendor/github.com/docker/go-events/LICENSE | 201 - .../github.com/docker/go-events/MAINTAINERS | 46 - vendor/github.com/docker/go-events/README.md | 117 - .../github.com/docker/go-events/broadcast.go | 178 - vendor/github.com/docker/go-events/channel.go | 61 - vendor/github.com/docker/go-events/errors.go | 10 - vendor/github.com/docker/go-events/event.go | 15 - vendor/github.com/docker/go-events/filter.go | 52 - vendor/github.com/docker/go-events/queue.go | 111 - vendor/github.com/docker/go-events/retry.go | 260 - .../docker/go-metrics/CONTRIBUTING.md | 55 - vendor/github.com/docker/go-metrics/LICENSE | 191 - .../github.com/docker/go-metrics/LICENSE.docs | 425 - vendor/github.com/docker/go-metrics/NOTICE | 16 - vendor/github.com/docker/go-metrics/README.md | 91 - .../github.com/docker/go-metrics/counter.go | 52 - vendor/github.com/docker/go-metrics/docs.go | 3 - vendor/github.com/docker/go-metrics/gauge.go | 72 - .../github.com/docker/go-metrics/handler.go | 74 - .../github.com/docker/go-metrics/helpers.go | 10 - .../github.com/docker/go-metrics/namespace.go | 315 - .../github.com/docker/go-metrics/register.go | 15 - vendor/github.com/docker/go-metrics/timer.go | 85 - vendor/github.com/docker/go-metrics/unit.go | 12 - .../docker/go-units/CONTRIBUTING.md | 67 - vendor/github.com/docker/go-units/LICENSE | 191 - vendor/github.com/docker/go-units/MAINTAINERS | 46 - vendor/github.com/docker/go-units/README.md | 16 - vendor/github.com/docker/go-units/circle.yml | 11 - vendor/github.com/docker/go-units/duration.go | 35 - vendor/github.com/docker/go-units/size.go | 108 - vendor/github.com/docker/go-units/ulimit.go | 123 - .../docker/spdystream/CONTRIBUTING.md | 13 - vendor/github.com/docker/spdystream/LICENSE | 191 - .../github.com/docker/spdystream/LICENSE.docs | 425 - .../github.com/docker/spdystream/MAINTAINERS | 28 - vendor/github.com/docker/spdystream/README.md | 77 - .../docker/spdystream/connection.go | 958 - .../github.com/docker/spdystream/handlers.go | 38 - .../github.com/docker/spdystream/priority.go | 98 - .../docker/spdystream/spdy/dictionary.go | 187 - .../github.com/docker/spdystream/spdy/read.go | 348 - .../docker/spdystream/spdy/types.go | 275 - .../docker/spdystream/spdy/write.go | 318 - vendor/github.com/docker/spdystream/stream.go | 327 - vendor/github.com/docker/spdystream/utils.go | 16 - .../github.com/emicklei/go-restful/.gitignore | 70 - .../emicklei/go-restful/.travis.yml | 6 - .../github.com/emicklei/go-restful/CHANGES.md | 273 - vendor/github.com/emicklei/go-restful/LICENSE | 22 - .../github.com/emicklei/go-restful/Makefile | 7 - .../github.com/emicklei/go-restful/README.md | 88 - vendor/github.com/emicklei/go-restful/Srcfile | 1 - .../emicklei/go-restful/bench_test.sh | 10 - .../emicklei/go-restful/compress.go | 123 - .../emicklei/go-restful/compressor_cache.go | 103 - .../emicklei/go-restful/compressor_pools.go | 91 - .../emicklei/go-restful/compressors.go | 54 - .../emicklei/go-restful/constants.go | 30 - .../emicklei/go-restful/container.go | 377 - .../emicklei/go-restful/cors_filter.go | 202 - .../emicklei/go-restful/coverage.sh | 2 - .../github.com/emicklei/go-restful/curly.go | 164 - .../emicklei/go-restful/curly_route.go | 54 - vendor/github.com/emicklei/go-restful/doc.go | 185 - .../emicklei/go-restful/entity_accessors.go | 162 - .../github.com/emicklei/go-restful/filter.go | 35 - vendor/github.com/emicklei/go-restful/json.go | 11 - .../emicklei/go-restful/jsoniter.go | 12 - .../github.com/emicklei/go-restful/jsr311.go | 297 - .../github.com/emicklei/go-restful/log/log.go | 34 - .../github.com/emicklei/go-restful/logger.go | 32 - vendor/github.com/emicklei/go-restful/mime.go | 50 - .../emicklei/go-restful/options_filter.go | 34 - .../emicklei/go-restful/parameter.go | 143 - .../emicklei/go-restful/path_expression.go | 74 - .../emicklei/go-restful/path_processor.go | 63 - .../github.com/emicklei/go-restful/request.go | 118 - .../emicklei/go-restful/response.go | 255 - .../github.com/emicklei/go-restful/route.go | 170 - .../emicklei/go-restful/route_builder.go | 326 - .../github.com/emicklei/go-restful/router.go | 20 - .../emicklei/go-restful/service_error.go | 23 - .../emicklei/go-restful/web_service.go | 290 - .../go-restful/web_service_container.go | 39 - .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 6 - .../github.com/fsnotify/fsnotify/.travis.yml | 36 - vendor/github.com/fsnotify/fsnotify/AUTHORS | 52 - .../github.com/fsnotify/fsnotify/CHANGELOG.md | 317 - .../fsnotify/fsnotify/CONTRIBUTING.md | 77 - vendor/github.com/fsnotify/fsnotify/LICENSE | 28 - vendor/github.com/fsnotify/fsnotify/README.md | 130 - vendor/github.com/fsnotify/fsnotify/fen.go | 37 - .../github.com/fsnotify/fsnotify/fsnotify.go | 68 - .../github.com/fsnotify/fsnotify/inotify.go | 337 - .../fsnotify/fsnotify/inotify_poller.go | 187 - vendor/github.com/fsnotify/fsnotify/kqueue.go | 521 - .../fsnotify/fsnotify/open_mode_bsd.go | 11 - .../fsnotify/fsnotify/open_mode_darwin.go | 12 - .../github.com/fsnotify/fsnotify/windows.go | 561 - vendor/github.com/go-logr/logr/LICENSE | 201 - vendor/github.com/go-logr/logr/README.md | 181 - vendor/github.com/go-logr/logr/logr.go | 178 - .../github.com/godbus/dbus/v5/CONTRIBUTING.md | 50 - vendor/github.com/godbus/dbus/v5/LICENSE | 25 - vendor/github.com/godbus/dbus/v5/MAINTAINERS | 3 - .../github.com/godbus/dbus/v5/README.markdown | 46 - vendor/github.com/godbus/dbus/v5/auth.go | 252 - .../godbus/dbus/v5/auth_anonymous.go | 16 - .../godbus/dbus/v5/auth_external.go | 26 - vendor/github.com/godbus/dbus/v5/auth_sha1.go | 102 - vendor/github.com/godbus/dbus/v5/call.go | 69 - vendor/github.com/godbus/dbus/v5/conn.go | 959 - .../github.com/godbus/dbus/v5/conn_darwin.go | 37 - .../github.com/godbus/dbus/v5/conn_other.go | 93 - vendor/github.com/godbus/dbus/v5/conn_unix.go | 17 - .../github.com/godbus/dbus/v5/conn_windows.go | 15 - vendor/github.com/godbus/dbus/v5/dbus.go | 432 - vendor/github.com/godbus/dbus/v5/decoder.go | 286 - .../godbus/dbus/v5/default_handler.go | 342 - vendor/github.com/godbus/dbus/v5/doc.go | 69 - vendor/github.com/godbus/dbus/v5/encoder.go | 210 - vendor/github.com/godbus/dbus/v5/export.go | 441 - vendor/github.com/godbus/dbus/v5/homedir.go | 28 - .../godbus/dbus/v5/homedir_dynamic.go | 15 - .../godbus/dbus/v5/homedir_static.go | 45 - vendor/github.com/godbus/dbus/v5/match.go | 89 - vendor/github.com/godbus/dbus/v5/message.go | 353 - vendor/github.com/godbus/dbus/v5/object.go | 174 - vendor/github.com/godbus/dbus/v5/sequence.go | 24 - .../godbus/dbus/v5/sequential_handler.go | 125 - .../godbus/dbus/v5/server_interfaces.go | 107 - vendor/github.com/godbus/dbus/v5/sig.go | 259 - .../godbus/dbus/v5/transport_darwin.go | 6 - .../godbus/dbus/v5/transport_generic.go | 50 - .../godbus/dbus/v5/transport_nonce_tcp.go | 39 - .../godbus/dbus/v5/transport_tcp.go | 41 - .../godbus/dbus/v5/transport_unix.go | 214 - .../dbus/v5/transport_unixcred_dragonfly.go | 95 - .../dbus/v5/transport_unixcred_freebsd.go | 92 - .../dbus/v5/transport_unixcred_linux.go | 25 - .../dbus/v5/transport_unixcred_openbsd.go | 14 - vendor/github.com/godbus/dbus/v5/variant.go | 150 - .../godbus/dbus/v5/variant_lexer.go | 284 - .../godbus/dbus/v5/variant_parser.go | 817 - vendor/github.com/gogo/googleapis/LICENSE | 203 - .../gogo/googleapis/google/rpc/code.pb.go | 257 - .../gogo/googleapis/google/rpc/code.proto | 185 - .../googleapis/google/rpc/error_details.pb.go | 4904 -- .../googleapis/google/rpc/error_details.proto | 200 - .../gogo/googleapis/google/rpc/status.pb.go | 731 - .../gogo/googleapis/google/rpc/status.proto | 94 - vendor/github.com/gogo/protobuf/AUTHORS | 15 - vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - vendor/github.com/gogo/protobuf/LICENSE | 35 - .../gogo/protobuf/gogoproto/Makefile | 37 - .../github.com/gogo/protobuf/gogoproto/doc.go | 169 - .../gogo/protobuf/gogoproto/gogo.pb.go | 874 - .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 - .../gogo/protobuf/gogoproto/gogo.proto | 144 - .../gogo/protobuf/gogoproto/helper.go | 415 - .../gogo/protobuf/plugin/compare/compare.go | 580 - .../protobuf/plugin/compare/comparetest.go | 118 - .../plugin/defaultcheck/defaultcheck.go | 133 - .../plugin/description/description.go | 201 - .../plugin/description/descriptiontest.go | 73 - .../protobuf/plugin/embedcheck/embedcheck.go | 200 - .../plugin/enumstringer/enumstringer.go | 104 - .../gogo/protobuf/plugin/equal/equal.go | 694 - .../gogo/protobuf/plugin/equal/equaltest.go | 109 - .../gogo/protobuf/plugin/face/face.go | 233 - .../gogo/protobuf/plugin/face/facetest.go | 82 - .../gogo/protobuf/plugin/gostring/gostring.go | 386 - .../protobuf/plugin/gostring/gostringtest.go | 90 - .../protobuf/plugin/marshalto/marshalto.go | 1140 - .../protobuf/plugin/oneofcheck/oneofcheck.go | 93 - .../gogo/protobuf/plugin/populate/populate.go | 815 - .../gogo/protobuf/plugin/size/size.go | 696 - .../gogo/protobuf/plugin/size/sizetest.go | 134 - .../gogo/protobuf/plugin/stringer/stringer.go | 347 - .../protobuf/plugin/stringer/stringertest.go | 83 - .../gogo/protobuf/plugin/testgen/testgen.go | 608 - .../gogo/protobuf/plugin/union/union.go | 209 - .../gogo/protobuf/plugin/union/uniontest.go | 86 - .../protobuf/plugin/unmarshal/unmarshal.go | 1657 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 258 - .../gogo/protobuf/proto/custom_gogo.go | 39 - .../github.com/gogo/protobuf/proto/decode.go | 427 - .../gogo/protobuf/proto/deprecated.go | 63 - .../github.com/gogo/protobuf/proto/discard.go | 350 - .../gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 49 - .../github.com/gogo/protobuf/proto/encode.go | 205 - .../gogo/protobuf/proto/encode_gogo.go | 33 - .../github.com/gogo/protobuf/proto/equal.go | 300 - .../gogo/protobuf/proto/extensions.go | 605 - .../gogo/protobuf/proto/extensions_gogo.go | 389 - vendor/github.com/gogo/protobuf/proto/lib.go | 973 - .../gogo/protobuf/proto/lib_gogo.go | 50 - .../gogo/protobuf/proto/message_set.go | 181 - .../gogo/protobuf/proto/pointer_reflect.go | 357 - .../protobuf/proto/pointer_reflect_gogo.go | 59 - .../gogo/protobuf/proto/pointer_unsafe.go | 308 - .../protobuf/proto/pointer_unsafe_gogo.go | 56 - .../gogo/protobuf/proto/properties.go | 610 - .../gogo/protobuf/proto/properties_gogo.go | 36 - .../gogo/protobuf/proto/skip_gogo.go | 119 - .../gogo/protobuf/proto/table_marshal.go | 3009 -- .../gogo/protobuf/proto/table_marshal_gogo.go | 388 - .../gogo/protobuf/proto/table_merge.go | 676 - .../gogo/protobuf/proto/table_unmarshal.go | 2249 - .../protobuf/proto/table_unmarshal_gogo.go | 385 - vendor/github.com/gogo/protobuf/proto/text.go | 930 - .../gogo/protobuf/proto/text_gogo.go | 57 - .../gogo/protobuf/proto/text_parser.go | 1018 - .../gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 49 - .../gogo/protobuf/proto/wrappers.go | 1888 - .../gogo/protobuf/proto/wrappers_gogo.go | 113 - .../protoc-gen-gogo/descriptor/Makefile | 36 - .../protoc-gen-gogo/descriptor/descriptor.go | 118 - .../descriptor/descriptor.pb.go | 2865 -- .../descriptor/descriptor_gostring.gen.go | 752 - .../protoc-gen-gogo/descriptor/helper.go | 390 - .../protoc-gen-gogo/generator/generator.go | 3444 -- .../protoc-gen-gogo/generator/helper.go | 461 - .../generator/internal/remap/remap.go | 117 - .../protobuf/protoc-gen-gogo/grpc/grpc.go | 536 - .../protobuf/protoc-gen-gogo/plugin/Makefile | 37 - .../protoc-gen-gogo/plugin/plugin.pb.go | 365 - .../gogo/protobuf/sortkeys/sortkeys.go | 101 - vendor/github.com/gogo/protobuf/types/any.go | 140 - .../github.com/gogo/protobuf/types/any.pb.go | 694 - .../github.com/gogo/protobuf/types/api.pb.go | 2134 - vendor/github.com/gogo/protobuf/types/doc.go | 35 - .../gogo/protobuf/types/duration.go | 100 - .../gogo/protobuf/types/duration.pb.go | 517 - .../gogo/protobuf/types/duration_gogo.go | 100 - .../gogo/protobuf/types/empty.pb.go | 462 - .../gogo/protobuf/types/field_mask.pb.go | 738 - .../gogo/protobuf/types/protosize.go | 34 - .../gogo/protobuf/types/source_context.pb.go | 524 - .../gogo/protobuf/types/struct.pb.go | 2271 - .../gogo/protobuf/types/timestamp.go | 130 - .../gogo/protobuf/types/timestamp.pb.go | 539 - .../gogo/protobuf/types/timestamp_gogo.go | 94 - .../github.com/gogo/protobuf/types/type.pb.go | 3355 -- .../gogo/protobuf/types/wrappers.pb.go | 2703 - .../gogo/protobuf/types/wrappers_gogo.go | 300 - .../gogo/protobuf/vanity/command/command.go | 161 - .../github.com/gogo/protobuf/vanity/enum.go | 78 - .../github.com/gogo/protobuf/vanity/field.go | 90 - .../github.com/gogo/protobuf/vanity/file.go | 197 - .../gogo/protobuf/vanity/foreach.go | 125 - vendor/github.com/gogo/protobuf/vanity/msg.go | 154 - vendor/github.com/golang/groupcache/LICENSE | 191 - .../github.com/golang/groupcache/lru/lru.go | 133 - vendor/github.com/golang/protobuf/AUTHORS | 3 - .../github.com/golang/protobuf/CONTRIBUTORS | 3 - vendor/github.com/golang/protobuf/LICENSE | 28 - .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 - .../golang/protobuf/proto/deprecated.go | 63 - .../golang/protobuf/proto/discard.go | 350 - .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../golang/protobuf/proto/extensions.go | 607 - .../github.com/golang/protobuf/proto/lib.go | 965 - .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../golang/protobuf/proto/properties.go | 544 - .../golang/protobuf/proto/table_marshal.go | 2776 -- .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 - .../github.com/golang/protobuf/proto/text.go | 845 - .../golang/protobuf/proto/text_parser.go | 880 - .../github.com/golang/protobuf/ptypes/any.go | 141 - .../golang/protobuf/ptypes/any/any.pb.go | 203 - .../golang/protobuf/ptypes/any/any.proto | 155 - .../github.com/golang/protobuf/ptypes/doc.go | 35 - .../golang/protobuf/ptypes/duration.go | 102 - .../protobuf/ptypes/duration/duration.pb.go | 163 - .../protobuf/ptypes/duration/duration.proto | 116 - .../golang/protobuf/ptypes/timestamp.go | 132 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 185 - .../protobuf/ptypes/timestamp/timestamp.proto | 138 - vendor/github.com/google/go-cmp/LICENSE | 27 - .../google/go-cmp/cmp/cmpopts/equate.go | 148 - .../google/go-cmp/cmp/cmpopts/errors_go113.go | 15 - .../go-cmp/cmp/cmpopts/errors_xerrors.go | 18 - .../google/go-cmp/cmp/cmpopts/ignore.go | 206 - .../google/go-cmp/cmp/cmpopts/sort.go | 147 - .../go-cmp/cmp/cmpopts/struct_filter.go | 187 - .../google/go-cmp/cmp/cmpopts/xform.go | 35 - .../github.com/google/go-cmp/cmp/compare.go | 682 - .../google/go-cmp/cmp/export_panic.go | 15 - .../google/go-cmp/cmp/export_unsafe.go | 35 - .../go-cmp/cmp/internal/diff/debug_disable.go | 17 - .../go-cmp/cmp/internal/diff/debug_enable.go | 122 - .../google/go-cmp/cmp/internal/diff/diff.go | 398 - .../google/go-cmp/cmp/internal/flags/flags.go | 9 - .../cmp/internal/flags/toolchain_legacy.go | 10 - .../cmp/internal/flags/toolchain_recent.go | 10 - .../go-cmp/cmp/internal/function/func.go | 99 - .../google/go-cmp/cmp/internal/value/name.go | 157 - .../cmp/internal/value/pointer_purego.go | 33 - .../cmp/internal/value/pointer_unsafe.go | 36 - .../google/go-cmp/cmp/internal/value/sort.go | 106 - .../google/go-cmp/cmp/internal/value/zero.go | 48 - .../github.com/google/go-cmp/cmp/options.go | 552 - vendor/github.com/google/go-cmp/cmp/path.go | 378 - vendor/github.com/google/go-cmp/cmp/report.go | 54 - .../google/go-cmp/cmp/report_compare.go | 432 - .../google/go-cmp/cmp/report_references.go | 264 - .../google/go-cmp/cmp/report_reflect.go | 402 - .../google/go-cmp/cmp/report_slices.go | 465 - .../google/go-cmp/cmp/report_text.go | 431 - .../google/go-cmp/cmp/report_value.go | 121 - vendor/github.com/google/gofuzz/.travis.yml | 13 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - vendor/github.com/google/gofuzz/LICENSE | 202 - vendor/github.com/google/gofuzz/README.md | 71 - vendor/github.com/google/gofuzz/fuzz.go | 506 - vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CONTRIBUTING.md | 10 - vendor/github.com/google/uuid/CONTRIBUTORS | 9 - vendor/github.com/google/uuid/LICENSE | 27 - vendor/github.com/google/uuid/README.md | 19 - vendor/github.com/google/uuid/dce.go | 80 - vendor/github.com/google/uuid/doc.go | 12 - vendor/github.com/google/uuid/hash.go | 53 - vendor/github.com/google/uuid/marshal.go | 38 - vendor/github.com/google/uuid/node.go | 90 - vendor/github.com/google/uuid/node_js.go | 12 - vendor/github.com/google/uuid/node_net.go | 33 - vendor/github.com/google/uuid/sql.go | 59 - vendor/github.com/google/uuid/time.go | 123 - vendor/github.com/google/uuid/util.go | 43 - vendor/github.com/google/uuid/uuid.go | 251 - vendor/github.com/google/uuid/version1.go | 44 - vendor/github.com/google/uuid/version4.go | 51 - .../go-grpc-prometheus/.gitignore | 201 - .../go-grpc-prometheus/.travis.yml | 25 - .../go-grpc-prometheus/CHANGELOG.md | 24 - .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 - .../go-grpc-prometheus/README.md | 247 - .../go-grpc-prometheus/client.go | 39 - .../go-grpc-prometheus/client_metrics.go | 170 - .../go-grpc-prometheus/client_reporter.go | 46 - .../go-grpc-prometheus/makefile | 16 - .../go-grpc-prometheus/metric_options.go | 41 - .../go-grpc-prometheus/server.go | 48 - .../go-grpc-prometheus/server_metrics.go | 185 - .../go-grpc-prometheus/server_reporter.go | 46 - .../grpc-ecosystem/go-grpc-prometheus/util.go | 50 - vendor/github.com/hashicorp/errwrap/LICENSE | 354 - vendor/github.com/hashicorp/errwrap/README.md | 89 - .../github.com/hashicorp/errwrap/errwrap.go | 169 - .../hashicorp/go-multierror/.travis.yml | 12 - .../hashicorp/go-multierror/LICENSE | 353 - .../hashicorp/go-multierror/Makefile | 31 - .../hashicorp/go-multierror/README.md | 97 - .../hashicorp/go-multierror/append.go | 41 - .../hashicorp/go-multierror/flatten.go | 26 - .../hashicorp/go-multierror/format.go | 27 - .../hashicorp/go-multierror/multierror.go | 51 - .../hashicorp/go-multierror/prefix.go | 37 - .../hashicorp/go-multierror/sort.go | 16 - .../github.com/imdario/mergo/.deepsource.toml | 12 - vendor/github.com/imdario/mergo/.gitignore | 33 - vendor/github.com/imdario/mergo/.travis.yml | 12 - .../imdario/mergo/CODE_OF_CONDUCT.md | 46 - vendor/github.com/imdario/mergo/LICENSE | 28 - vendor/github.com/imdario/mergo/README.md | 247 - vendor/github.com/imdario/mergo/doc.go | 143 - vendor/github.com/imdario/mergo/map.go | 178 - vendor/github.com/imdario/mergo/merge.go | 380 - vendor/github.com/imdario/mergo/mergo.go | 78 - .../github.com/json-iterator/go/.codecov.yml | 3 - vendor/github.com/json-iterator/go/.gitignore | 4 - .../github.com/json-iterator/go/.travis.yml | 14 - vendor/github.com/json-iterator/go/Gopkg.lock | 21 - vendor/github.com/json-iterator/go/Gopkg.toml | 26 - vendor/github.com/json-iterator/go/LICENSE | 21 - vendor/github.com/json-iterator/go/README.md | 87 - vendor/github.com/json-iterator/go/adapter.go | 150 - vendor/github.com/json-iterator/go/any.go | 325 - .../github.com/json-iterator/go/any_array.go | 278 - .../github.com/json-iterator/go/any_bool.go | 137 - .../github.com/json-iterator/go/any_float.go | 83 - .../github.com/json-iterator/go/any_int32.go | 74 - .../github.com/json-iterator/go/any_int64.go | 74 - .../json-iterator/go/any_invalid.go | 82 - vendor/github.com/json-iterator/go/any_nil.go | 69 - .../github.com/json-iterator/go/any_number.go | 123 - .../github.com/json-iterator/go/any_object.go | 374 - vendor/github.com/json-iterator/go/any_str.go | 166 - .../github.com/json-iterator/go/any_uint32.go | 74 - .../github.com/json-iterator/go/any_uint64.go | 74 - vendor/github.com/json-iterator/go/build.sh | 12 - vendor/github.com/json-iterator/go/config.go | 375 - .../go/fuzzy_mode_convert_table.md | 7 - vendor/github.com/json-iterator/go/iter.go | 349 - .../github.com/json-iterator/go/iter_array.go | 64 - .../github.com/json-iterator/go/iter_float.go | 339 - .../github.com/json-iterator/go/iter_int.go | 345 - .../json-iterator/go/iter_object.go | 267 - .../github.com/json-iterator/go/iter_skip.go | 130 - .../json-iterator/go/iter_skip_sloppy.go | 163 - .../json-iterator/go/iter_skip_strict.go | 99 - .../github.com/json-iterator/go/iter_str.go | 215 - .../github.com/json-iterator/go/jsoniter.go | 18 - vendor/github.com/json-iterator/go/pool.go | 42 - vendor/github.com/json-iterator/go/reflect.go | 337 - .../json-iterator/go/reflect_array.go | 104 - .../json-iterator/go/reflect_dynamic.go | 70 - .../json-iterator/go/reflect_extension.go | 483 - .../json-iterator/go/reflect_json_number.go | 112 - .../go/reflect_json_raw_message.go | 60 - .../json-iterator/go/reflect_map.go | 346 - .../json-iterator/go/reflect_marshaler.go | 225 - .../json-iterator/go/reflect_native.go | 453 - .../json-iterator/go/reflect_optional.go | 129 - .../json-iterator/go/reflect_slice.go | 99 - .../go/reflect_struct_decoder.go | 1092 - .../go/reflect_struct_encoder.go | 211 - vendor/github.com/json-iterator/go/stream.go | 210 - .../json-iterator/go/stream_float.go | 111 - .../github.com/json-iterator/go/stream_int.go | 190 - .../github.com/json-iterator/go/stream_str.go | 372 - vendor/github.com/json-iterator/go/test.sh | 12 - vendor/github.com/klauspost/compress/LICENSE | 28 - .../klauspost/compress/fse/README.md | 79 - .../klauspost/compress/fse/bitreader.go | 122 - .../klauspost/compress/fse/bitwriter.go | 168 - .../klauspost/compress/fse/bytereader.go | 47 - .../klauspost/compress/fse/compress.go | 684 - .../klauspost/compress/fse/decompress.go | 374 - .../github.com/klauspost/compress/fse/fse.go | 144 - .../klauspost/compress/huff0/.gitignore | 1 - .../klauspost/compress/huff0/README.md | 89 - .../klauspost/compress/huff0/bitreader.go | 329 - .../klauspost/compress/huff0/bitwriter.go | 210 - .../klauspost/compress/huff0/bytereader.go | 54 - .../klauspost/compress/huff0/compress.go | 657 - .../klauspost/compress/huff0/decompress.go | 1164 - .../klauspost/compress/huff0/huff0.go | 273 - .../klauspost/compress/snappy/.gitignore | 16 - .../klauspost/compress/snappy/AUTHORS | 15 - .../klauspost/compress/snappy/CONTRIBUTORS | 37 - .../klauspost/compress/snappy/LICENSE | 27 - .../klauspost/compress/snappy/README | 107 - .../klauspost/compress/snappy/decode.go | 237 - .../klauspost/compress/snappy/decode_amd64.go | 14 - .../klauspost/compress/snappy/decode_amd64.s | 482 - .../klauspost/compress/snappy/decode_other.go | 115 - .../klauspost/compress/snappy/encode.go | 285 - .../klauspost/compress/snappy/encode_amd64.go | 29 - .../klauspost/compress/snappy/encode_amd64.s | 730 - .../klauspost/compress/snappy/encode_other.go | 238 - .../klauspost/compress/snappy/runbench.cmd | 2 - .../klauspost/compress/snappy/snappy.go | 98 - .../klauspost/compress/zstd/README.md | 417 - .../klauspost/compress/zstd/bitreader.go | 136 - .../klauspost/compress/zstd/bitwriter.go | 169 - .../klauspost/compress/zstd/blockdec.go | 739 - .../klauspost/compress/zstd/blockenc.go | 871 - .../compress/zstd/blocktype_string.go | 85 - .../klauspost/compress/zstd/bytebuf.go | 127 - .../klauspost/compress/zstd/bytereader.go | 88 - .../klauspost/compress/zstd/decodeheader.go | 202 - .../klauspost/compress/zstd/decoder.go | 561 - .../compress/zstd/decoder_options.go | 84 - .../klauspost/compress/zstd/dict.go | 122 - .../klauspost/compress/zstd/enc_base.go | 177 - .../klauspost/compress/zstd/enc_best.go | 487 - .../klauspost/compress/zstd/enc_better.go | 1170 - .../klauspost/compress/zstd/enc_dfast.go | 1121 - .../klauspost/compress/zstd/enc_fast.go | 1018 - .../klauspost/compress/zstd/encoder.go | 576 - .../compress/zstd/encoder_options.go | 312 - .../klauspost/compress/zstd/framedec.go | 494 - .../klauspost/compress/zstd/frameenc.go | 137 - .../klauspost/compress/zstd/fse_decoder.go | 385 - .../klauspost/compress/zstd/fse_encoder.go | 726 - .../klauspost/compress/zstd/fse_predefined.go | 158 - .../klauspost/compress/zstd/hash.go | 77 - .../klauspost/compress/zstd/history.go | 89 - .../compress/zstd/internal/xxhash/LICENSE.txt | 22 - .../compress/zstd/internal/xxhash/README.md | 58 - .../compress/zstd/internal/xxhash/xxhash.go | 238 - .../zstd/internal/xxhash/xxhash_amd64.go | 13 - .../zstd/internal/xxhash/xxhash_amd64.s | 215 - .../zstd/internal/xxhash/xxhash_other.go | 76 - .../zstd/internal/xxhash/xxhash_safe.go | 11 - .../klauspost/compress/zstd/seqdec.go | 492 - .../klauspost/compress/zstd/seqenc.go | 115 - .../klauspost/compress/zstd/snappy.go | 436 - .../klauspost/compress/zstd/zstd.go | 156 - .../golang_protobuf_extensions/LICENSE | 201 - .../golang_protobuf_extensions/NOTICE | 1 - .../pbutil/.gitignore | 1 - .../pbutil/Makefile | 7 - .../pbutil/decode.go | 75 - .../golang_protobuf_extensions/pbutil/doc.go | 16 - .../pbutil/encode.go | 46 - vendor/github.com/miekg/pkcs11/.gitignore | 3 - vendor/github.com/miekg/pkcs11/.travis.yml | 14 - vendor/github.com/miekg/pkcs11/LICENSE | 27 - .../github.com/miekg/pkcs11/Makefile.release | 57 - vendor/github.com/miekg/pkcs11/README.md | 68 - vendor/github.com/miekg/pkcs11/const.go | 736 - vendor/github.com/miekg/pkcs11/error.go | 98 - vendor/github.com/miekg/pkcs11/hsm.db | Bin 10240 -> 0 bytes vendor/github.com/miekg/pkcs11/params.go | 190 - vendor/github.com/miekg/pkcs11/pkcs11.go | 1606 - vendor/github.com/miekg/pkcs11/pkcs11.h | 265 - vendor/github.com/miekg/pkcs11/pkcs11f.h | 939 - vendor/github.com/miekg/pkcs11/pkcs11go.h | 33 - vendor/github.com/miekg/pkcs11/pkcs11t.h | 2047 - vendor/github.com/miekg/pkcs11/release.go | 17 - vendor/github.com/miekg/pkcs11/softhsm.conf | 1 - vendor/github.com/miekg/pkcs11/softhsm2.conf | 4 - vendor/github.com/miekg/pkcs11/types.go | 303 - vendor/github.com/miekg/pkcs11/vendor.go | 127 - vendor/github.com/mistifyio/go-zfs/.gitignore | 1 - .../github.com/mistifyio/go-zfs/.travis.yml | 43 - .../mistifyio/go-zfs/CONTRIBUTING.md | 60 - vendor/github.com/mistifyio/go-zfs/LICENSE | 201 - vendor/github.com/mistifyio/go-zfs/README.md | 54 - .../github.com/mistifyio/go-zfs/Vagrantfile | 34 - vendor/github.com/mistifyio/go-zfs/error.go | 18 - vendor/github.com/mistifyio/go-zfs/utils.go | 360 - .../mistifyio/go-zfs/utils_notsolaris.go | 17 - .../mistifyio/go-zfs/utils_solaris.go | 17 - vendor/github.com/mistifyio/go-zfs/zfs.go | 452 - vendor/github.com/mistifyio/go-zfs/zpool.go | 112 - vendor/github.com/moby/locker/LICENSE | 190 - vendor/github.com/moby/locker/README.md | 65 - vendor/github.com/moby/locker/locker.go | 112 - vendor/github.com/moby/sys/mountinfo/LICENSE | 202 - vendor/github.com/moby/sys/mountinfo/doc.go | 44 - .../moby/sys/mountinfo/mounted_linux.go | 58 - .../moby/sys/mountinfo/mounted_unix.go | 66 - .../moby/sys/mountinfo/mountinfo.go | 66 - .../moby/sys/mountinfo/mountinfo_bsd.go | 67 - .../moby/sys/mountinfo/mountinfo_filters.go | 63 - .../moby/sys/mountinfo/mountinfo_linux.go | 221 - .../sys/mountinfo/mountinfo_unsupported.go | 18 - .../moby/sys/mountinfo/mountinfo_windows.go | 10 - vendor/github.com/moby/sys/symlink/LICENSE | 202 - .../moby/sys/symlink/LICENSE.APACHE | 191 - .../github.com/moby/sys/symlink/LICENSE.BSD | 27 - vendor/github.com/moby/sys/symlink/README.md | 6 - vendor/github.com/moby/sys/symlink/doc.go | 4 - vendor/github.com/moby/sys/symlink/fs.go | 142 - vendor/github.com/moby/sys/symlink/fs_unix.go | 17 - .../github.com/moby/sys/symlink/fs_windows.go | 185 - .../modern-go/concurrent/.gitignore | 1 - .../modern-go/concurrent/.travis.yml | 14 - .../github.com/modern-go/concurrent/LICENSE | 201 - .../github.com/modern-go/concurrent/README.md | 49 - .../modern-go/concurrent/executor.go | 14 - .../modern-go/concurrent/go_above_19.go | 15 - .../modern-go/concurrent/go_below_19.go | 33 - vendor/github.com/modern-go/concurrent/log.go | 13 - .../github.com/modern-go/concurrent/test.sh | 12 - .../concurrent/unbounded_executor.go | 119 - .../github.com/modern-go/reflect2/.gitignore | 2 - .../github.com/modern-go/reflect2/.travis.yml | 15 - .../github.com/modern-go/reflect2/Gopkg.lock | 15 - .../github.com/modern-go/reflect2/Gopkg.toml | 35 - vendor/github.com/modern-go/reflect2/LICENSE | 201 - .../github.com/modern-go/reflect2/README.md | 71 - .../modern-go/reflect2/go_above_17.go | 8 - .../modern-go/reflect2/go_above_19.go | 14 - .../modern-go/reflect2/go_below_17.go | 9 - .../modern-go/reflect2/go_below_19.go | 14 - .../github.com/modern-go/reflect2/reflect2.go | 298 - .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 - .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 - .../github.com/modern-go/reflect2/safe_map.go | 101 - .../modern-go/reflect2/safe_slice.go | 92 - .../modern-go/reflect2/safe_struct.go | 29 - .../modern-go/reflect2/safe_type.go | 78 - vendor/github.com/modern-go/reflect2/test.sh | 12 - .../github.com/modern-go/reflect2/type_map.go | 113 - .../modern-go/reflect2/unsafe_array.go | 65 - .../modern-go/reflect2/unsafe_eface.go | 59 - .../modern-go/reflect2/unsafe_field.go | 74 - .../modern-go/reflect2/unsafe_iface.go | 64 - .../modern-go/reflect2/unsafe_link.go | 70 - .../modern-go/reflect2/unsafe_map.go | 138 - .../modern-go/reflect2/unsafe_ptr.go | 46 - .../modern-go/reflect2/unsafe_slice.go | 177 - .../modern-go/reflect2/unsafe_struct.go | 59 - .../modern-go/reflect2/unsafe_type.go | 85 - .../opencontainers/go-digest/.mailmap | 4 - .../opencontainers/go-digest/.pullapprove.yml | 28 - .../opencontainers/go-digest/.travis.yml | 5 - .../opencontainers/go-digest/CONTRIBUTING.md | 72 - .../opencontainers/go-digest/LICENSE | 192 - .../opencontainers/go-digest/LICENSE.docs | 425 - .../opencontainers/go-digest/MAINTAINERS | 5 - .../opencontainers/go-digest/README.md | 96 - .../opencontainers/go-digest/algorithm.go | 193 - .../opencontainers/go-digest/digest.go | 157 - .../opencontainers/go-digest/digester.go | 40 - .../opencontainers/go-digest/digestset/set.go | 262 - .../opencontainers/go-digest/doc.go | 62 - .../opencontainers/go-digest/verifiers.go | 46 - .../opencontainers/image-spec/LICENSE | 191 - .../image-spec/identity/chainid.go | 67 - .../image-spec/identity/helpers.go | 40 - .../image-spec/specs-go/v1/annotations.go | 56 - .../image-spec/specs-go/v1/config.go | 103 - .../image-spec/specs-go/v1/descriptor.go | 64 - .../image-spec/specs-go/v1/index.go | 32 - .../image-spec/specs-go/v1/layout.go | 28 - .../image-spec/specs-go/v1/manifest.go | 35 - .../image-spec/specs-go/v1/mediatype.go | 48 - .../image-spec/specs-go/version.go | 32 - .../image-spec/specs-go/versioned.go | 23 - vendor/github.com/opencontainers/runc/LICENSE | 191 - vendor/github.com/opencontainers/runc/NOTICE | 17 - .../runc/libcontainer/user/lookup_unix.go | 156 - .../runc/libcontainer/user/user.go | 604 - .../runc/libcontainer/user/user_fuzzer.go | 42 - .../opencontainers/runtime-spec/LICENSE | 191 - .../runtime-spec/specs-go/config.go | 700 - .../runtime-spec/specs-go/state.go | 56 - .../runtime-spec/specs-go/version.go | 18 - .../github.com/opencontainers/selinux/LICENSE | 201 - .../opencontainers/selinux/go-selinux/doc.go | 14 - .../selinux/go-selinux/label/label.go | 97 - .../selinux/go-selinux/label/label_linux.go | 193 - .../selinux/go-selinux/label/label_stub.go | 49 - .../selinux/go-selinux/selinux.go | 284 - .../selinux/go-selinux/selinux_linux.go | 1212 - .../selinux/go-selinux/selinux_stub.go | 154 - .../selinux/go-selinux/xattrs_linux.go | 38 - .../selinux/pkg/pwalk/README.md | 42 - .../opencontainers/selinux/pkg/pwalk/pwalk.go | 104 - .../pelletier/go-toml/.dockerignore | 2 - .../github.com/pelletier/go-toml/.gitignore | 5 - .../pelletier/go-toml/CONTRIBUTING.md | 132 - .../github.com/pelletier/go-toml/Dockerfile | 11 - vendor/github.com/pelletier/go-toml/LICENSE | 21 - vendor/github.com/pelletier/go-toml/Makefile | 29 - .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 - vendor/github.com/pelletier/go-toml/README.md | 151 - .../pelletier/go-toml/azure-pipelines.yml | 230 - .../github.com/pelletier/go-toml/benchmark.sh | 35 - vendor/github.com/pelletier/go-toml/doc.go | 23 - .../pelletier/go-toml/example-crlf.toml | 30 - .../github.com/pelletier/go-toml/example.toml | 30 - vendor/github.com/pelletier/go-toml/fuzz.go | 31 - vendor/github.com/pelletier/go-toml/fuzz.sh | 15 - vendor/github.com/pelletier/go-toml/fuzzit.sh | 26 - .../pelletier/go-toml/keysparsing.go | 112 - vendor/github.com/pelletier/go-toml/lexer.go | 807 - .../github.com/pelletier/go-toml/localtime.go | 281 - .../github.com/pelletier/go-toml/marshal.go | 1269 - .../go-toml/marshal_OrderPreserve_test.toml | 39 - .../pelletier/go-toml/marshal_test.toml | 39 - vendor/github.com/pelletier/go-toml/parser.go | 493 - .../github.com/pelletier/go-toml/position.go | 29 - vendor/github.com/pelletier/go-toml/token.go | 134 - vendor/github.com/pelletier/go-toml/toml.go | 529 - .../pelletier/go-toml/tomltree_create.go | 155 - .../pelletier/go-toml/tomltree_write.go | 517 - vendor/github.com/pkg/errors/.gitignore | 24 - vendor/github.com/pkg/errors/.travis.yml | 10 - vendor/github.com/pkg/errors/LICENSE | 23 - vendor/github.com/pkg/errors/Makefile | 44 - vendor/github.com/pkg/errors/README.md | 59 - vendor/github.com/pkg/errors/appveyor.yml | 32 - vendor/github.com/pkg/errors/errors.go | 288 - vendor/github.com/pkg/errors/go113.go | 38 - vendor/github.com/pkg/errors/stack.go | 177 - vendor/github.com/pmezard/go-difflib/LICENSE | 27 - .../pmezard/go-difflib/difflib/difflib.go | 772 - .../prometheus/client_golang/LICENSE | 201 - .../prometheus/client_golang/NOTICE | 23 - .../client_golang/prometheus/.gitignore | 1 - .../client_golang/prometheus/README.md | 1 - .../client_golang/prometheus/build_info.go | 29 - .../prometheus/build_info_pre_1.12.go | 22 - .../client_golang/prometheus/collector.go | 120 - .../client_golang/prometheus/counter.go | 321 - .../client_golang/prometheus/desc.go | 186 - .../client_golang/prometheus/doc.go | 199 - .../prometheus/expvar_collector.go | 119 - .../client_golang/prometheus/fnv.go | 42 - .../client_golang/prometheus/gauge.go | 289 - .../client_golang/prometheus/go_collector.go | 396 - .../client_golang/prometheus/histogram.go | 637 - .../prometheus/internal/metric.go | 85 - .../client_golang/prometheus/labels.go | 87 - .../client_golang/prometheus/metric.go | 176 - .../client_golang/prometheus/observer.go | 64 - .../prometheus/process_collector.go | 151 - .../prometheus/process_collector_other.go | 65 - .../prometheus/process_collector_windows.go | 116 - .../prometheus/promhttp/delegator.go | 370 - .../client_golang/prometheus/promhttp/http.go | 379 - .../prometheus/promhttp/instrument_client.go | 219 - .../prometheus/promhttp/instrument_server.go | 447 - .../client_golang/prometheus/registry.go | 948 - .../client_golang/prometheus/summary.go | 737 - .../client_golang/prometheus/timer.go | 54 - .../client_golang/prometheus/untyped.go | 42 - .../client_golang/prometheus/value.go | 205 - .../client_golang/prometheus/vec.go | 484 - .../client_golang/prometheus/wrap.go | 212 - .../prometheus/client_model/LICENSE | 201 - .../github.com/prometheus/client_model/NOTICE | 5 - .../prometheus/client_model/go/metrics.pb.go | 723 - vendor/github.com/prometheus/common/LICENSE | 201 - vendor/github.com/prometheus/common/NOTICE | 5 - .../prometheus/common/expfmt/decode.go | 429 - .../prometheus/common/expfmt/encode.go | 162 - .../prometheus/common/expfmt/expfmt.go | 41 - .../prometheus/common/expfmt/fuzz.go | 36 - .../common/expfmt/openmetrics_create.go | 527 - .../prometheus/common/expfmt/text_create.go | 465 - .../prometheus/common/expfmt/text_parse.go | 764 - .../bitbucket.org/ww/goautoneg/README.txt | 67 - .../bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../prometheus/common/model/alert.go | 136 - .../prometheus/common/model/fingerprinting.go | 105 - .../github.com/prometheus/common/model/fnv.go | 42 - .../prometheus/common/model/labels.go | 210 - .../prometheus/common/model/labelset.go | 169 - .../prometheus/common/model/metric.go | 102 - .../prometheus/common/model/model.go | 16 - .../prometheus/common/model/signature.go | 144 - .../prometheus/common/model/silence.go | 106 - .../prometheus/common/model/time.go | 274 - .../prometheus/common/model/value.go | 416 - .../github.com/prometheus/procfs/.gitignore | 1 - .../prometheus/procfs/.golangci.yml | 4 - .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 - .../prometheus/procfs/CONTRIBUTING.md | 121 - vendor/github.com/prometheus/procfs/LICENSE | 201 - .../prometheus/procfs/MAINTAINERS.md | 2 - vendor/github.com/prometheus/procfs/Makefile | 29 - .../prometheus/procfs/Makefile.common | 302 - vendor/github.com/prometheus/procfs/NOTICE | 7 - vendor/github.com/prometheus/procfs/README.md | 61 - .../github.com/prometheus/procfs/SECURITY.md | 6 - vendor/github.com/prometheus/procfs/arp.go | 85 - .../github.com/prometheus/procfs/buddyinfo.go | 85 - .../github.com/prometheus/procfs/cpuinfo.go | 481 - .../prometheus/procfs/cpuinfo_armx.go | 19 - .../prometheus/procfs/cpuinfo_mipsx.go | 19 - .../prometheus/procfs/cpuinfo_others.go | 19 - .../prometheus/procfs/cpuinfo_ppcx.go | 19 - .../prometheus/procfs/cpuinfo_riscvx.go | 19 - .../prometheus/procfs/cpuinfo_s390x.go | 18 - .../prometheus/procfs/cpuinfo_x86.go | 19 - vendor/github.com/prometheus/procfs/crypto.go | 153 - vendor/github.com/prometheus/procfs/doc.go | 45 - .../prometheus/procfs/fixtures.ttar | 6553 --- vendor/github.com/prometheus/procfs/fs.go | 43 - .../github.com/prometheus/procfs/fscache.go | 422 - .../prometheus/procfs/internal/fs/fs.go | 55 - .../prometheus/procfs/internal/util/parse.go | 97 - .../procfs/internal/util/readfile.go | 38 - .../procfs/internal/util/sysreadfile.go | 48 - .../internal/util/sysreadfile_compat.go | 26 - .../procfs/internal/util/valueparser.go | 91 - vendor/github.com/prometheus/procfs/ipvs.go | 241 - .../prometheus/procfs/kernel_random.go | 62 - .../github.com/prometheus/procfs/loadavg.go | 62 - vendor/github.com/prometheus/procfs/mdstat.go | 213 - .../github.com/prometheus/procfs/meminfo.go | 277 - .../github.com/prometheus/procfs/mountinfo.go | 180 - .../prometheus/procfs/mountstats.go | 638 - .../prometheus/procfs/net_conntrackstat.go | 153 - .../github.com/prometheus/procfs/net_dev.go | 205 - .../prometheus/procfs/net_ip_socket.go | 220 - .../prometheus/procfs/net_protocols.go | 180 - .../prometheus/procfs/net_sockstat.go | 163 - .../prometheus/procfs/net_softnet.go | 102 - .../github.com/prometheus/procfs/net_tcp.go | 64 - .../github.com/prometheus/procfs/net_udp.go | 64 - .../github.com/prometheus/procfs/net_unix.go | 257 - vendor/github.com/prometheus/procfs/proc.go | 319 - .../prometheus/procfs/proc_cgroup.go | 98 - .../prometheus/procfs/proc_environ.go | 37 - .../prometheus/procfs/proc_fdinfo.go | 133 - .../github.com/prometheus/procfs/proc_io.go | 59 - .../prometheus/procfs/proc_limits.go | 160 - .../github.com/prometheus/procfs/proc_maps.go | 209 - .../github.com/prometheus/procfs/proc_ns.go | 68 - .../github.com/prometheus/procfs/proc_psi.go | 100 - .../prometheus/procfs/proc_smaps.go | 165 - .../github.com/prometheus/procfs/proc_stat.go | 189 - .../prometheus/procfs/proc_status.go | 170 - .../github.com/prometheus/procfs/schedstat.go | 121 - vendor/github.com/prometheus/procfs/slab.go | 151 - vendor/github.com/prometheus/procfs/stat.go | 244 - vendor/github.com/prometheus/procfs/swaps.go | 89 - vendor/github.com/prometheus/procfs/ttar | 413 - vendor/github.com/prometheus/procfs/vm.go | 210 - vendor/github.com/prometheus/procfs/xfrm.go | 186 - .../github.com/prometheus/procfs/zoneinfo.go | 196 - .../russross/blackfriday/v2/.gitignore | 8 - .../russross/blackfriday/v2/.travis.yml | 17 - .../russross/blackfriday/v2/LICENSE.txt | 29 - .../russross/blackfriday/v2/README.md | 291 - .../russross/blackfriday/v2/block.go | 1590 - .../github.com/russross/blackfriday/v2/doc.go | 18 - .../github.com/russross/blackfriday/v2/esc.go | 34 - .../russross/blackfriday/v2/html.go | 949 - .../russross/blackfriday/v2/inline.go | 1228 - .../russross/blackfriday/v2/markdown.go | 950 - .../russross/blackfriday/v2/node.go | 354 - .../russross/blackfriday/v2/smartypants.go | 457 - .../sanitized_anchor_name/.travis.yml | 16 - .../shurcooL/sanitized_anchor_name/LICENSE | 21 - .../shurcooL/sanitized_anchor_name/README.md | 36 - .../shurcooL/sanitized_anchor_name/main.go | 29 - vendor/github.com/sirupsen/logrus/.gitignore | 4 - .../github.com/sirupsen/logrus/.golangci.yml | 40 - vendor/github.com/sirupsen/logrus/.travis.yml | 15 - .../github.com/sirupsen/logrus/CHANGELOG.md | 259 - vendor/github.com/sirupsen/logrus/LICENSE | 21 - vendor/github.com/sirupsen/logrus/README.md | 513 - vendor/github.com/sirupsen/logrus/alt_exit.go | 76 - .../github.com/sirupsen/logrus/appveyor.yml | 14 - .../github.com/sirupsen/logrus/buffer_pool.go | 52 - vendor/github.com/sirupsen/logrus/doc.go | 26 - vendor/github.com/sirupsen/logrus/entry.go | 431 - vendor/github.com/sirupsen/logrus/exported.go | 270 - .../github.com/sirupsen/logrus/formatter.go | 78 - vendor/github.com/sirupsen/logrus/hooks.go | 34 - .../sirupsen/logrus/json_formatter.go | 128 - vendor/github.com/sirupsen/logrus/logger.go | 404 - vendor/github.com/sirupsen/logrus/logrus.go | 186 - .../logrus/terminal_check_appengine.go | 11 - .../sirupsen/logrus/terminal_check_bsd.go | 13 - .../sirupsen/logrus/terminal_check_js.go | 7 - .../logrus/terminal_check_no_terminal.go | 11 - .../logrus/terminal_check_notappengine.go | 17 - .../sirupsen/logrus/terminal_check_solaris.go | 11 - .../sirupsen/logrus/terminal_check_unix.go | 13 - .../sirupsen/logrus/terminal_check_windows.go | 27 - .../sirupsen/logrus/text_formatter.go | 339 - vendor/github.com/sirupsen/logrus/writer.go | 70 - .../stefanberger/go-pkcs11uri/.gitignore | 2 - .../stefanberger/go-pkcs11uri/.travis.yml | 25 - .../stefanberger/go-pkcs11uri/LICENSE | 177 - .../stefanberger/go-pkcs11uri/Makefile | 28 - .../stefanberger/go-pkcs11uri/README.md | 102 - .../stefanberger/go-pkcs11uri/pkcs11uri.go | 453 - vendor/github.com/stretchr/testify/LICENSE | 21 - .../testify/assert/assertion_compare.go | 274 - .../testify/assert/assertion_format.go | 644 - .../testify/assert/assertion_format.go.tmpl | 5 - .../testify/assert/assertion_forward.go | 1276 - .../testify/assert/assertion_forward.go.tmpl | 5 - .../stretchr/testify/assert/assertions.go | 1695 - .../github.com/stretchr/testify/assert/doc.go | 45 - .../stretchr/testify/assert/errors.go | 10 - .../testify/assert/forward_assertions.go | 16 - .../testify/assert/http_assertions.go | 162 - .../stretchr/testify/require/doc.go | 28 - .../testify/require/forward_requirements.go | 16 - .../stretchr/testify/require/require.go | 1631 - .../stretchr/testify/require/require.go.tmpl | 6 - .../testify/require/require_forward.go | 1277 - .../testify/require/require_forward.go.tmpl | 5 - .../stretchr/testify/require/requirements.go | 29 - vendor/github.com/tchap/go-patricia/AUTHORS | 3 - vendor/github.com/tchap/go-patricia/LICENSE | 20 - .../tchap/go-patricia/patricia/children.go | 325 - .../tchap/go-patricia/patricia/patricia.go | 594 - vendor/github.com/urfave/cli/.flake8 | 2 - vendor/github.com/urfave/cli/.gitignore | 3 - vendor/github.com/urfave/cli/.travis.yml | 35 - vendor/github.com/urfave/cli/CHANGELOG.md | 504 - .../github.com/urfave/cli/CODE_OF_CONDUCT.md | 74 - vendor/github.com/urfave/cli/CONTRIBUTING.md | 18 - vendor/github.com/urfave/cli/LICENSE | 21 - vendor/github.com/urfave/cli/README.md | 1571 - vendor/github.com/urfave/cli/app.go | 530 - vendor/github.com/urfave/cli/appveyor.yml | 23 - vendor/github.com/urfave/cli/category.go | 44 - vendor/github.com/urfave/cli/cli.go | 22 - vendor/github.com/urfave/cli/command.go | 335 - vendor/github.com/urfave/cli/context.go | 339 - vendor/github.com/urfave/cli/docs.go | 148 - vendor/github.com/urfave/cli/errors.go | 115 - vendor/github.com/urfave/cli/fish.go | 194 - vendor/github.com/urfave/cli/flag.go | 346 - vendor/github.com/urfave/cli/flag_bool.go | 109 - vendor/github.com/urfave/cli/flag_bool_t.go | 110 - vendor/github.com/urfave/cli/flag_duration.go | 106 - vendor/github.com/urfave/cli/flag_float64.go | 106 - vendor/github.com/urfave/cli/flag_generic.go | 110 - vendor/github.com/urfave/cli/flag_int.go | 105 - vendor/github.com/urfave/cli/flag_int64.go | 106 - .../github.com/urfave/cli/flag_int64_slice.go | 141 - .../github.com/urfave/cli/flag_int_slice.go | 142 - vendor/github.com/urfave/cli/flag_string.go | 98 - .../urfave/cli/flag_string_slice.go | 138 - vendor/github.com/urfave/cli/flag_uint.go | 106 - vendor/github.com/urfave/cli/flag_uint64.go | 106 - vendor/github.com/urfave/cli/funcs.go | 44 - vendor/github.com/urfave/cli/help.go | 348 - vendor/github.com/urfave/cli/parse.go | 80 - vendor/github.com/urfave/cli/sort.go | 29 - vendor/github.com/urfave/cli/template.go | 121 - vendor/go.etcd.io/bbolt/.gitignore | 5 - vendor/go.etcd.io/bbolt/.travis.yml | 17 - vendor/go.etcd.io/bbolt/LICENSE | 20 - vendor/go.etcd.io/bbolt/Makefile | 38 - vendor/go.etcd.io/bbolt/README.md | 957 - vendor/go.etcd.io/bbolt/bolt_386.go | 7 - vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm64.go | 9 - vendor/go.etcd.io/bbolt/bolt_linux.go | 10 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 9 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 9 - vendor/go.etcd.io/bbolt/bolt_openbsd.go | 27 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 9 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 9 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 9 - vendor/go.etcd.io/bbolt/bolt_unix.go | 93 - vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 90 - vendor/go.etcd.io/bbolt/bolt_unix_solaris.go | 88 - vendor/go.etcd.io/bbolt/bolt_windows.go | 141 - vendor/go.etcd.io/bbolt/boltsync_unix.go | 8 - vendor/go.etcd.io/bbolt/bucket.go | 777 - vendor/go.etcd.io/bbolt/cursor.go | 396 - vendor/go.etcd.io/bbolt/db.go | 1174 - vendor/go.etcd.io/bbolt/doc.go | 44 - vendor/go.etcd.io/bbolt/errors.go | 71 - vendor/go.etcd.io/bbolt/freelist.go | 404 - vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 - vendor/go.etcd.io/bbolt/node.go | 602 - vendor/go.etcd.io/bbolt/page.go | 204 - vendor/go.etcd.io/bbolt/tx.go | 724 - vendor/go.etcd.io/bbolt/unsafe.go | 39 - vendor/go.mozilla.org/pkcs7/.gitignore | 24 - vendor/go.mozilla.org/pkcs7/.travis.yml | 10 - vendor/go.mozilla.org/pkcs7/LICENSE | 22 - vendor/go.mozilla.org/pkcs7/Makefile | 20 - vendor/go.mozilla.org/pkcs7/README.md | 69 - vendor/go.mozilla.org/pkcs7/ber.go | 251 - vendor/go.mozilla.org/pkcs7/decrypt.go | 177 - vendor/go.mozilla.org/pkcs7/encrypt.go | 399 - vendor/go.mozilla.org/pkcs7/pkcs7.go | 291 - vendor/go.mozilla.org/pkcs7/sign.go | 429 - vendor/go.mozilla.org/pkcs7/verify.go | 264 - vendor/go.opencensus.io/.gitignore | 9 - vendor/go.opencensus.io/.travis.yml | 17 - vendor/go.opencensus.io/AUTHORS | 1 - vendor/go.opencensus.io/CONTRIBUTING.md | 63 - vendor/go.opencensus.io/LICENSE | 202 - vendor/go.opencensus.io/Makefile | 96 - vendor/go.opencensus.io/README.md | 267 - vendor/go.opencensus.io/appveyor.yml | 24 - vendor/go.opencensus.io/internal/internal.go | 37 - vendor/go.opencensus.io/internal/sanitize.go | 50 - .../internal/traceinternals.go | 53 - vendor/go.opencensus.io/opencensus.go | 21 - vendor/go.opencensus.io/trace/basetypes.go | 119 - vendor/go.opencensus.io/trace/config.go | 86 - vendor/go.opencensus.io/trace/doc.go | 53 - vendor/go.opencensus.io/trace/evictedqueue.go | 38 - vendor/go.opencensus.io/trace/export.go | 97 - .../trace/internal/internal.go | 22 - vendor/go.opencensus.io/trace/lrumap.go | 61 - vendor/go.opencensus.io/trace/sampling.go | 75 - vendor/go.opencensus.io/trace/spanbucket.go | 130 - vendor/go.opencensus.io/trace/spanstore.go | 306 - vendor/go.opencensus.io/trace/status_codes.go | 37 - vendor/go.opencensus.io/trace/trace.go | 598 - vendor/go.opencensus.io/trace/trace_go11.go | 32 - .../go.opencensus.io/trace/trace_nongo11.go | 25 - .../trace/tracestate/tracestate.go | 147 - vendor/golang.org/x/crypto/AUTHORS | 3 - vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - vendor/golang.org/x/crypto/cast5/cast5.go | 533 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 223 - .../x/crypto/ed25519/ed25519_go113.go | 74 - .../ed25519/internal/edwards25519/const.go | 1422 - .../internal/edwards25519/edwards25519.go | 1793 - .../x/crypto/openpgp/armor/armor.go | 224 - .../x/crypto/openpgp/armor/encode.go | 160 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/elgamal/elgamal.go | 124 - .../x/crypto/openpgp/errors/errors.go | 72 - vendor/golang.org/x/crypto/openpgp/keys.go | 693 - .../x/crypto/openpgp/packet/compressed.go | 123 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 208 - .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 162 - .../x/crypto/openpgp/packet/packet.go | 584 - .../x/crypto/openpgp/packet/private_key.go | 385 - .../x/crypto/openpgp/packet/public_key.go | 753 - .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 - .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../openpgp/packet/symmetrically_encrypted.go | 290 - .../x/crypto/openpgp/packet/userattribute.go | 91 - .../x/crypto/openpgp/packet/userid.go | 160 - vendor/golang.org/x/crypto/openpgp/read.go | 442 - vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 - vendor/golang.org/x/crypto/openpgp/write.go | 418 - vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - .../x/crypto/ssh/terminal/terminal.go | 76 - vendor/golang.org/x/net/AUTHORS | 3 - vendor/golang.org/x/net/CONTRIBUTORS | 3 - vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/context/context.go | 56 - .../x/net/context/ctxhttp/ctxhttp.go | 71 - vendor/golang.org/x/net/context/go17.go | 73 - vendor/golang.org/x/net/context/go19.go | 21 - vendor/golang.org/x/net/context/pre_go17.go | 301 - vendor/golang.org/x/net/context/pre_go19.go | 110 - vendor/golang.org/x/net/http/httpguts/guts.go | 50 - .../golang.org/x/net/http/httpguts/httplex.go | 346 - vendor/golang.org/x/net/http2/.gitignore | 2 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ciphers.go | 641 - .../x/net/http2/client_conn_pool.go | 278 - vendor/golang.org/x/net/http2/databuffer.go | 146 - vendor/golang.org/x/net/http2/errors.go | 133 - vendor/golang.org/x/net/http2/flow.go | 52 - vendor/golang.org/x/net/http2/frame.go | 1614 - vendor/golang.org/x/net/http2/go111.go | 30 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/headermap.go | 88 - vendor/golang.org/x/net/http2/hpack/encode.go | 240 - vendor/golang.org/x/net/http2/hpack/hpack.go | 504 - .../golang.org/x/net/http2/hpack/huffman.go | 229 - vendor/golang.org/x/net/http2/hpack/tables.go | 479 - vendor/golang.org/x/net/http2/http2.go | 385 - vendor/golang.org/x/net/http2/not_go111.go | 21 - vendor/golang.org/x/net/http2/pipe.go | 168 - vendor/golang.org/x/net/http2/server.go | 2984 -- vendor/golang.org/x/net/http2/transport.go | 2760 -- vendor/golang.org/x/net/http2/write.go | 365 - vendor/golang.org/x/net/http2/writesched.go | 248 - .../x/net/http2/writesched_priority.go | 452 - .../x/net/http2/writesched_random.go | 77 - vendor/golang.org/x/net/idna/idna10.0.0.go | 735 - vendor/golang.org/x/net/idna/idna9.0.0.go | 683 - vendor/golang.org/x/net/idna/punycode.go | 203 - vendor/golang.org/x/net/idna/tables10.0.0.go | 4560 -- vendor/golang.org/x/net/idna/tables11.0.0.go | 4654 -- vendor/golang.org/x/net/idna/tables12.0.0.go | 4734 -- vendor/golang.org/x/net/idna/tables13.0.0.go | 4840 -- vendor/golang.org/x/net/idna/tables9.0.0.go | 4487 -- vendor/golang.org/x/net/idna/trie.go | 72 - vendor/golang.org/x/net/idna/trieval.go | 119 - .../x/net/internal/timeseries/timeseries.go | 525 - vendor/golang.org/x/net/trace/events.go | 532 - vendor/golang.org/x/net/trace/histogram.go | 365 - vendor/golang.org/x/net/trace/trace.go | 1130 - vendor/golang.org/x/net/websocket/client.go | 106 - vendor/golang.org/x/net/websocket/dial.go | 24 - vendor/golang.org/x/net/websocket/hybi.go | 583 - vendor/golang.org/x/net/websocket/server.go | 113 - .../golang.org/x/net/websocket/websocket.go | 451 - vendor/golang.org/x/oauth2/.travis.yml | 13 - vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/golang.org/x/oauth2/LICENSE | 27 - vendor/golang.org/x/oauth2/README.md | 36 - .../x/oauth2/internal/client_appengine.go | 13 - vendor/golang.org/x/oauth2/internal/doc.go | 6 - vendor/golang.org/x/oauth2/internal/oauth2.go | 37 - vendor/golang.org/x/oauth2/internal/token.go | 294 - .../golang.org/x/oauth2/internal/transport.go | 33 - vendor/golang.org/x/oauth2/oauth2.go | 381 - vendor/golang.org/x/oauth2/token.go | 178 - vendor/golang.org/x/oauth2/transport.go | 89 - vendor/golang.org/x/sync/AUTHORS | 3 - vendor/golang.org/x/sync/CONTRIBUTORS | 3 - vendor/golang.org/x/sync/LICENSE | 27 - vendor/golang.org/x/sync/PATENTS | 22 - vendor/golang.org/x/sync/errgroup/errgroup.go | 66 - .../golang.org/x/sync/semaphore/semaphore.go | 136 - vendor/golang.org/x/sys/AUTHORS | 3 - vendor/golang.org/x/sys/CONTRIBUTORS | 3 - vendor/golang.org/x/sys/LICENSE | 27 - vendor/golang.org/x/sys/PATENTS | 22 - .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/plan9/asm.s | 8 - vendor/golang.org/x/sys/plan9/asm_plan9_386.s | 30 - .../golang.org/x/sys/plan9/asm_plan9_amd64.s | 30 - vendor/golang.org/x/sys/plan9/asm_plan9_arm.s | 25 - vendor/golang.org/x/sys/plan9/const_plan9.go | 70 - vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 - vendor/golang.org/x/sys/plan9/env_plan9.go | 31 - vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 - vendor/golang.org/x/sys/plan9/mkall.sh | 150 - vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 - .../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 - .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 23 - vendor/golang.org/x/sys/plan9/race.go | 30 - vendor/golang.org/x/sys/plan9/race0.go | 25 - vendor/golang.org/x/sys/plan9/str.go | 22 - vendor/golang.org/x/sys/plan9/syscall.go | 116 - .../golang.org/x/sys/plan9/syscall_plan9.go | 349 - .../x/sys/plan9/zsyscall_plan9_386.go | 284 - .../x/sys/plan9/zsyscall_plan9_amd64.go | 284 - .../x/sys/plan9/zsyscall_plan9_arm.go | 284 - .../golang.org/x/sys/plan9/zsysnum_plan9.go | 49 - vendor/golang.org/x/sys/unix/.gitignore | 2 - vendor/golang.org/x/sys/unix/README.md | 184 - .../golang.org/x/sys/unix/affinity_linux.go | 86 - vendor/golang.org/x/sys/unix/aliases.go | 15 - vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 18 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 29 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 29 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 66 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 58 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 57 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 53 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 57 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 55 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 45 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 49 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 57 - .../x/sys/unix/asm_openbsd_mips64.s | 30 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 18 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 - .../golang.org/x/sys/unix/bluetooth_linux.go | 36 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 196 - vendor/golang.org/x/sys/unix/constants.go | 14 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 - vendor/golang.org/x/sys/unix/dev_darwin.go | 24 - vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 - vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 - vendor/golang.org/x/sys/unix/dev_linux.go | 42 - vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 - vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 - vendor/golang.org/x/sys/unix/dev_zos.go | 29 - vendor/golang.org/x/sys/unix/dirent.go | 103 - vendor/golang.org/x/sys/unix/endian_big.go | 10 - vendor/golang.org/x/sys/unix/endian_little.go | 10 - vendor/golang.org/x/sys/unix/env_unix.go | 32 - vendor/golang.org/x/sys/unix/epoll_zos.go | 221 - .../x/sys/unix/errors_freebsd_386.go | 233 - .../x/sys/unix/errors_freebsd_amd64.go | 233 - .../x/sys/unix/errors_freebsd_arm.go | 226 - .../x/sys/unix/errors_freebsd_arm64.go | 17 - vendor/golang.org/x/sys/unix/fcntl.go | 37 - vendor/golang.org/x/sys/unix/fcntl_darwin.go | 24 - .../x/sys/unix/fcntl_linux_32bit.go | 14 - vendor/golang.org/x/sys/unix/fdset.go | 30 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 - vendor/golang.org/x/sys/unix/gccgo.go | 60 - vendor/golang.org/x/sys/unix/gccgo_c.c | 45 - .../x/sys/unix/gccgo_linux_amd64.go | 21 - vendor/golang.org/x/sys/unix/ioctl.go | 75 - vendor/golang.org/x/sys/unix/ioctl_linux.go | 196 - vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 - vendor/golang.org/x/sys/unix/mkall.sh | 243 - vendor/golang.org/x/sys/unix/mkerrors.sh | 750 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 16 - .../golang.org/x/sys/unix/pledge_openbsd.go | 163 - vendor/golang.org/x/sys/unix/ptrace_darwin.go | 12 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 12 - vendor/golang.org/x/sys/unix/race.go | 31 - vendor/golang.org/x/sys/unix/race0.go | 26 - .../x/sys/unix/readdirent_getdents.go | 13 - .../x/sys/unix/readdirent_getdirentries.go | 20 - .../x/sys/unix/sockcmsg_dragonfly.go | 16 - .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 93 - .../x/sys/unix/sockcmsg_unix_other.go | 47 - vendor/golang.org/x/sys/unix/str.go | 27 - vendor/golang.org/x/sys/unix/syscall.go | 95 - vendor/golang.org/x/sys/unix/syscall_aix.go | 553 - .../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 - .../x/sys/unix/syscall_aix_ppc64.go | 85 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 664 - .../x/sys/unix/syscall_darwin.1_12.go | 32 - .../x/sys/unix/syscall_darwin.1_13.go | 108 - .../golang.org/x/sys/unix/syscall_darwin.go | 688 - .../x/sys/unix/syscall_darwin_386.go | 51 - .../x/sys/unix/syscall_darwin_amd64.go | 51 - .../x/sys/unix/syscall_darwin_arm.go | 51 - .../x/sys/unix/syscall_darwin_arm64.go | 51 - .../x/sys/unix/syscall_darwin_libSystem.go | 34 - .../x/sys/unix/syscall_dragonfly.go | 541 - .../x/sys/unix/syscall_dragonfly_amd64.go | 57 - .../golang.org/x/sys/unix/syscall_freebsd.go | 872 - .../x/sys/unix/syscall_freebsd_386.go | 67 - .../x/sys/unix/syscall_freebsd_amd64.go | 67 - .../x/sys/unix/syscall_freebsd_arm.go | 63 - .../x/sys/unix/syscall_freebsd_arm64.go | 63 - .../golang.org/x/sys/unix/syscall_illumos.go | 178 - vendor/golang.org/x/sys/unix/syscall_linux.go | 2329 - .../x/sys/unix/syscall_linux_386.go | 388 - .../x/sys/unix/syscall_linux_amd64.go | 195 - .../x/sys/unix/syscall_linux_amd64_gc.go | 13 - .../x/sys/unix/syscall_linux_arm.go | 287 - .../x/sys/unix/syscall_linux_arm64.go | 246 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 15 - .../x/sys/unix/syscall_linux_gc_386.go | 17 - .../x/sys/unix/syscall_linux_gc_arm.go | 14 - .../x/sys/unix/syscall_linux_gccgo_386.go | 31 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 21 - .../x/sys/unix/syscall_linux_mips64x.go | 231 - .../x/sys/unix/syscall_linux_mipsx.go | 239 - .../x/sys/unix/syscall_linux_ppc.go | 272 - .../x/sys/unix/syscall_linux_ppc64x.go | 157 - .../x/sys/unix/syscall_linux_riscv64.go | 231 - .../x/sys/unix/syscall_linux_s390x.go | 343 - .../x/sys/unix/syscall_linux_sparc64.go | 152 - .../golang.org/x/sys/unix/syscall_netbsd.go | 618 - .../x/sys/unix/syscall_netbsd_386.go | 38 - .../x/sys/unix/syscall_netbsd_amd64.go | 38 - .../x/sys/unix/syscall_netbsd_arm.go | 38 - .../x/sys/unix/syscall_netbsd_arm64.go | 38 - .../golang.org/x/sys/unix/syscall_openbsd.go | 390 - .../x/sys/unix/syscall_openbsd_386.go | 42 - .../x/sys/unix/syscall_openbsd_amd64.go | 42 - .../x/sys/unix/syscall_openbsd_arm.go | 42 - .../x/sys/unix/syscall_openbsd_arm64.go | 42 - .../x/sys/unix/syscall_openbsd_mips64.go | 35 - .../golang.org/x/sys/unix/syscall_solaris.go | 746 - .../x/sys/unix/syscall_solaris_amd64.go | 28 - vendor/golang.org/x/sys/unix/syscall_unix.go | 431 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 18 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 25 - .../x/sys/unix/syscall_zos_s390x.go | 1781 - vendor/golang.org/x/sys/unix/timestruct.go | 77 - .../golang.org/x/sys/unix/unveil_openbsd.go | 42 - vendor/golang.org/x/sys/unix/xattr_bsd.go | 241 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1385 - .../x/sys/unix/zerrors_aix_ppc64.go | 1386 - .../x/sys/unix/zerrors_darwin_386.go | 1789 - .../x/sys/unix/zerrors_darwin_amd64.go | 1860 - .../x/sys/unix/zerrors_darwin_arm.go | 1789 - .../x/sys/unix/zerrors_darwin_arm64.go | 1860 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1738 - .../x/sys/unix/zerrors_freebsd_386.go | 1943 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1942 - .../x/sys/unix/zerrors_freebsd_arm.go | 1841 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1943 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 2827 -- .../x/sys/unix/zerrors_linux_386.go | 801 - .../x/sys/unix/zerrors_linux_amd64.go | 801 - .../x/sys/unix/zerrors_linux_arm.go | 807 - .../x/sys/unix/zerrors_linux_arm64.go | 798 - .../x/sys/unix/zerrors_linux_mips.go | 808 - .../x/sys/unix/zerrors_linux_mips64.go | 808 - .../x/sys/unix/zerrors_linux_mips64le.go | 808 - .../x/sys/unix/zerrors_linux_mipsle.go | 808 - .../x/sys/unix/zerrors_linux_ppc.go | 860 - .../x/sys/unix/zerrors_linux_ppc64.go | 864 - .../x/sys/unix/zerrors_linux_ppc64le.go | 864 - .../x/sys/unix/zerrors_linux_riscv64.go | 788 - .../x/sys/unix/zerrors_linux_s390x.go | 863 - .../x/sys/unix/zerrors_linux_sparc64.go | 858 - .../x/sys/unix/zerrors_netbsd_386.go | 1780 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1770 - .../x/sys/unix/zerrors_netbsd_arm.go | 1759 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1770 - .../x/sys/unix/zerrors_openbsd_386.go | 1665 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1775 - .../x/sys/unix/zerrors_openbsd_arm.go | 1667 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1798 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1863 - .../x/sys/unix/zerrors_solaris_amd64.go | 1557 - .../x/sys/unix/zerrors_zos_s390x.go | 838 - .../x/sys/unix/zptrace_armnn_linux.go | 42 - .../x/sys/unix/zptrace_linux_arm64.go | 17 - .../x/sys/unix/zptrace_mipsnn_linux.go | 51 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 51 - .../x/sys/unix/zptrace_x86_linux.go | 81 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1485 - .../x/sys/unix/zsyscall_aix_ppc64.go | 1443 - .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 - .../x/sys/unix/zsyscall_darwin_386.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_386.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_386.go | 2431 - .../x/sys/unix/zsyscall_darwin_386.s | 291 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_amd64.go | 2431 - .../x/sys/unix/zsyscall_darwin_amd64.s | 291 - .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_arm.go | 2417 - .../x/sys/unix/zsyscall_darwin_arm.s | 289 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_arm64.go | 2431 - .../x/sys/unix/zsyscall_darwin_arm64.s | 291 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1679 - .../x/sys/unix/zsyscall_freebsd_386.go | 2016 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 2016 - .../x/sys/unix/zsyscall_freebsd_arm.go | 2016 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 2016 - .../x/sys/unix/zsyscall_illumos_amd64.go | 128 - .../golang.org/x/sys/unix/zsyscall_linux.go | 1944 - .../x/sys/unix/zsyscall_linux_386.go | 579 - .../x/sys/unix/zsyscall_linux_amd64.go | 746 - .../x/sys/unix/zsyscall_linux_arm.go | 716 - .../x/sys/unix/zsyscall_linux_arm64.go | 603 - .../x/sys/unix/zsyscall_linux_mips.go | 759 - .../x/sys/unix/zsyscall_linux_mips64.go | 730 - .../x/sys/unix/zsyscall_linux_mips64le.go | 730 - .../x/sys/unix/zsyscall_linux_mipsle.go | 759 - .../x/sys/unix/zsyscall_linux_ppc.go | 762 - .../x/sys/unix/zsyscall_linux_ppc64.go | 808 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 808 - .../x/sys/unix/zsyscall_linux_riscv64.go | 583 - .../x/sys/unix/zsyscall_linux_s390x.go | 578 - .../x/sys/unix/zsyscall_linux_sparc64.go | 741 - .../x/sys/unix/zsyscall_netbsd_386.go | 1862 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 1862 - .../x/sys/unix/zsyscall_netbsd_arm.go | 1862 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 1862 - .../x/sys/unix/zsyscall_openbsd_386.go | 1693 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 1693 - .../x/sys/unix/zsyscall_openbsd_arm.go | 1693 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 1693 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 1693 - .../x/sys/unix/zsyscall_solaris_amd64.go | 1983 - .../x/sys/unix/zsyscall_zos_s390x.go | 1217 - .../x/sys/unix/zsysctl_openbsd_386.go | 274 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 272 - .../x/sys/unix/zsysctl_openbsd_arm.go | 274 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 276 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 280 - .../x/sys/unix/zsysnum_darwin_386.go | 438 - .../x/sys/unix/zsysnum_darwin_amd64.go | 440 - .../x/sys/unix/zsysnum_darwin_arm.go | 438 - .../x/sys/unix/zsysnum_darwin_arm64.go | 438 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 317 - .../x/sys/unix/zsysnum_freebsd_386.go | 397 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 397 - .../x/sys/unix/zsysnum_freebsd_arm.go | 397 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 397 - .../x/sys/unix/zsysnum_linux_386.go | 442 - .../x/sys/unix/zsysnum_linux_amd64.go | 364 - .../x/sys/unix/zsysnum_linux_arm.go | 406 - .../x/sys/unix/zsysnum_linux_arm64.go | 309 - .../x/sys/unix/zsysnum_linux_mips.go | 427 - .../x/sys/unix/zsysnum_linux_mips64.go | 357 - .../x/sys/unix/zsysnum_linux_mips64le.go | 357 - .../x/sys/unix/zsysnum_linux_mipsle.go | 427 - .../x/sys/unix/zsysnum_linux_ppc.go | 434 - .../x/sys/unix/zsysnum_linux_ppc64.go | 406 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 406 - .../x/sys/unix/zsysnum_linux_riscv64.go | 308 - .../x/sys/unix/zsysnum_linux_s390x.go | 371 - .../x/sys/unix/zsysnum_linux_sparc64.go | 385 - .../x/sys/unix/zsysnum_netbsd_386.go | 275 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 275 - .../x/sys/unix/zsysnum_netbsd_arm.go | 275 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 275 - .../x/sys/unix/zsysnum_openbsd_386.go | 219 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 219 - .../x/sys/unix/zsysnum_openbsd_arm.go | 219 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 218 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 221 - .../x/sys/unix/zsysnum_zos_s390x.go | 2670 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 354 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 358 - .../x/sys/unix/ztypes_darwin_386.go | 524 - .../x/sys/unix/ztypes_darwin_amd64.go | 537 - .../x/sys/unix/ztypes_darwin_arm.go | 524 - .../x/sys/unix/ztypes_darwin_arm64.go | 537 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 471 - .../x/sys/unix/ztypes_freebsd_386.go | 720 - .../x/sys/unix/ztypes_freebsd_amd64.go | 723 - .../x/sys/unix/ztypes_freebsd_arm.go | 704 - .../x/sys/unix/ztypes_freebsd_arm64.go | 701 - .../x/sys/unix/ztypes_illumos_amd64.go | 40 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 3744 -- .../golang.org/x/sys/unix/ztypes_linux_386.go | 620 - .../x/sys/unix/ztypes_linux_amd64.go | 638 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 615 - .../x/sys/unix/ztypes_linux_arm64.go | 617 - .../x/sys/unix/ztypes_linux_mips.go | 621 - .../x/sys/unix/ztypes_linux_mips64.go | 620 - .../x/sys/unix/ztypes_linux_mips64le.go | 620 - .../x/sys/unix/ztypes_linux_mipsle.go | 621 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 627 - .../x/sys/unix/ztypes_linux_ppc64.go | 627 - .../x/sys/unix/ztypes_linux_ppc64le.go | 627 - .../x/sys/unix/ztypes_linux_riscv64.go | 645 - .../x/sys/unix/ztypes_linux_s390x.go | 641 - .../x/sys/unix/ztypes_linux_sparc64.go | 622 - .../x/sys/unix/ztypes_netbsd_386.go | 500 - .../x/sys/unix/ztypes_netbsd_amd64.go | 508 - .../x/sys/unix/ztypes_netbsd_arm.go | 505 - .../x/sys/unix/ztypes_netbsd_arm64.go | 508 - .../x/sys/unix/ztypes_openbsd_386.go | 573 - .../x/sys/unix/ztypes_openbsd_amd64.go | 573 - .../x/sys/unix/ztypes_openbsd_arm.go | 574 - .../x/sys/unix/ztypes_openbsd_arm64.go | 567 - .../x/sys/unix/ztypes_openbsd_mips64.go | 567 - .../x/sys/unix/ztypes_solaris_amd64.go | 442 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 402 - vendor/golang.org/x/sys/windows/aliases.go | 13 - .../golang.org/x/sys/windows/dll_windows.go | 416 - vendor/golang.org/x/sys/windows/empty.s | 8 - .../golang.org/x/sys/windows/env_windows.go | 54 - vendor/golang.org/x/sys/windows/eventlog.go | 20 - .../golang.org/x/sys/windows/exec_windows.go | 132 - .../x/sys/windows/memory_windows.go | 37 - vendor/golang.org/x/sys/windows/mkerrors.bash | 70 - .../x/sys/windows/mkknownfolderids.bash | 27 - vendor/golang.org/x/sys/windows/mksyscall.go | 9 - vendor/golang.org/x/sys/windows/race.go | 30 - vendor/golang.org/x/sys/windows/race0.go | 25 - .../golang.org/x/sys/windows/registry/key.go | 198 - .../x/sys/windows/registry/mksyscall.go | 9 - .../x/sys/windows/registry/syscall.go | 32 - .../x/sys/windows/registry/value.go | 386 - .../sys/windows/registry/zsyscall_windows.go | 117 - .../x/sys/windows/security_windows.go | 1443 - vendor/golang.org/x/sys/windows/service.go | 237 - .../x/sys/windows/setupapierrors_windows.go | 100 - vendor/golang.org/x/sys/windows/str.go | 22 - .../golang.org/x/sys/windows/svc/debug/log.go | 56 - .../x/sys/windows/svc/debug/service.go | 45 - vendor/golang.org/x/sys/windows/svc/event.go | 48 - vendor/golang.org/x/sys/windows/svc/go12.c | 24 - vendor/golang.org/x/sys/windows/svc/go12.go | 11 - vendor/golang.org/x/sys/windows/svc/go13.go | 31 - .../x/sys/windows/svc/mgr/config.go | 180 - .../golang.org/x/sys/windows/svc/mgr/mgr.go | 215 - .../x/sys/windows/svc/mgr/recovery.go | 141 - .../x/sys/windows/svc/mgr/service.go | 77 - .../golang.org/x/sys/windows/svc/security.go | 108 - .../golang.org/x/sys/windows/svc/service.go | 378 - .../x/sys/windows/svc/sys_windows_386.s | 67 - .../x/sys/windows/svc/sys_windows_amd64.s | 46 - .../x/sys/windows/svc/sys_windows_arm.s | 36 - .../x/sys/windows/svc/sys_windows_arm64.s | 31 - vendor/golang.org/x/sys/windows/syscall.go | 112 - .../x/sys/windows/syscall_windows.go | 1671 - .../golang.org/x/sys/windows/types_windows.go | 2774 -- .../x/sys/windows/types_windows_386.go | 35 - .../x/sys/windows/types_windows_amd64.go | 34 - .../x/sys/windows/types_windows_arm.go | 35 - .../x/sys/windows/types_windows_arm64.go | 34 - .../x/sys/windows/zerrors_windows.go | 9468 ---- .../x/sys/windows/zknownfolderids_windows.go | 149 - .../x/sys/windows/zsyscall_windows.go | 3639 -- vendor/golang.org/x/term/AUTHORS | 3 - vendor/golang.org/x/term/CONTRIBUTING.md | 26 - vendor/golang.org/x/term/CONTRIBUTORS | 3 - vendor/golang.org/x/term/LICENSE | 27 - vendor/golang.org/x/term/PATENTS | 22 - vendor/golang.org/x/term/README.md | 17 - vendor/golang.org/x/term/term.go | 58 - vendor/golang.org/x/term/term_plan9.go | 42 - vendor/golang.org/x/term/term_solaris.go | 111 - vendor/golang.org/x/term/term_unix.go | 91 - vendor/golang.org/x/term/term_unix_aix.go | 10 - vendor/golang.org/x/term/term_unix_bsd.go | 12 - vendor/golang.org/x/term/term_unix_linux.go | 10 - vendor/golang.org/x/term/term_unix_zos.go | 10 - vendor/golang.org/x/term/term_unsupported.go | 38 - vendor/golang.org/x/term/term_windows.go | 79 - vendor/golang.org/x/term/terminal.go | 987 - vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/LICENSE | 27 - vendor/golang.org/x/text/PATENTS | 22 - .../x/text/secure/bidirule/bidirule.go | 336 - .../x/text/secure/bidirule/bidirule10.0.0.go | 11 - .../x/text/secure/bidirule/bidirule9.0.0.go | 14 - .../golang.org/x/text/transform/transform.go | 709 - vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 - .../golang.org/x/text/unicode/bidi/bracket.go | 335 - vendor/golang.org/x/text/unicode/bidi/core.go | 1058 - vendor/golang.org/x/text/unicode/bidi/prop.go | 206 - .../x/text/unicode/bidi/tables10.0.0.go | 1815 - .../x/text/unicode/bidi/tables11.0.0.go | 1887 - .../x/text/unicode/bidi/tables12.0.0.go | 1923 - .../x/text/unicode/bidi/tables13.0.0.go | 1955 - .../x/text/unicode/bidi/tables9.0.0.go | 1781 - .../golang.org/x/text/unicode/bidi/trieval.go | 60 - .../x/text/unicode/norm/composition.go | 512 - .../x/text/unicode/norm/forminfo.go | 278 - .../golang.org/x/text/unicode/norm/input.go | 109 - vendor/golang.org/x/text/unicode/norm/iter.go | 458 - .../x/text/unicode/norm/normalize.go | 609 - .../x/text/unicode/norm/readwriter.go | 125 - .../x/text/unicode/norm/tables10.0.0.go | 7657 --- .../x/text/unicode/norm/tables11.0.0.go | 7693 --- .../x/text/unicode/norm/tables12.0.0.go | 7710 --- .../x/text/unicode/norm/tables13.0.0.go | 7760 --- .../x/text/unicode/norm/tables9.0.0.go | 7637 --- .../x/text/unicode/norm/transform.go | 88 - vendor/golang.org/x/text/unicode/norm/trie.go | 54 - vendor/golang.org/x/time/AUTHORS | 3 - vendor/golang.org/x/time/CONTRIBUTORS | 3 - vendor/golang.org/x/time/LICENSE | 27 - vendor/golang.org/x/time/PATENTS | 22 - vendor/golang.org/x/time/rate/rate.go | 402 - vendor/golang.org/x/xerrors/LICENSE | 27 - vendor/golang.org/x/xerrors/PATENTS | 22 - vendor/golang.org/x/xerrors/README | 2 - vendor/golang.org/x/xerrors/adaptor.go | 193 - vendor/golang.org/x/xerrors/codereview.cfg | 1 - vendor/golang.org/x/xerrors/doc.go | 22 - vendor/golang.org/x/xerrors/errors.go | 33 - vendor/golang.org/x/xerrors/fmt.go | 187 - vendor/golang.org/x/xerrors/format.go | 34 - vendor/golang.org/x/xerrors/frame.go | 56 - .../golang.org/x/xerrors/internal/internal.go | 8 - vendor/golang.org/x/xerrors/wrap.go | 106 - vendor/google.golang.org/appengine/LICENSE | 202 - .../appengine/internal/api.go | 675 - .../appengine/internal/api_classic.go | 169 - .../appengine/internal/api_common.go | 123 - .../appengine/internal/app_id.go | 28 - .../appengine/internal/base/api_base.pb.go | 308 - .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 4367 -- .../internal/datastore/datastore_v3.proto | 551 - .../appengine/internal/identity.go | 55 - .../appengine/internal/identity_classic.go | 61 - .../appengine/internal/identity_flex.go | 11 - .../appengine/internal/identity_vm.go | 134 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 1313 - .../appengine/internal/log/log_service.proto | 150 - .../appengine/internal/main.go | 16 - .../appengine/internal/main_common.go | 7 - .../appengine/internal/main_vm.go | 69 - .../appengine/internal/metadata.go | 60 - .../appengine/internal/net.go | 56 - .../appengine/internal/regen.sh | 40 - .../internal/remote_api/remote_api.pb.go | 361 - .../internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 115 - .../internal/urlfetch/urlfetch_service.pb.go | 527 - .../internal/urlfetch/urlfetch_service.proto | 64 - .../appengine/urlfetch/urlfetch.go | 210 - vendor/google.golang.org/genproto/LICENSE | 202 - .../googleapis/rpc/status/status.pb.go | 115 - vendor/google.golang.org/grpc/.travis.yml | 42 - vendor/google.golang.org/grpc/AUTHORS | 1 - .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 62 - vendor/google.golang.org/grpc/GOVERNANCE.md | 1 - vendor/google.golang.org/grpc/LICENSE | 202 - vendor/google.golang.org/grpc/MAINTAINERS.md | 27 - vendor/google.golang.org/grpc/Makefile | 63 - vendor/google.golang.org/grpc/README.md | 121 - .../grpc/attributes/attributes.go | 70 - vendor/google.golang.org/grpc/backoff.go | 58 - .../google.golang.org/grpc/backoff/backoff.go | 52 - vendor/google.golang.org/grpc/balancer.go | 391 - .../grpc/balancer/balancer.go | 454 - .../grpc/balancer/base/balancer.go | 278 - .../grpc/balancer/base/base.go | 93 - .../grpc/balancer/roundrobin/roundrobin.go | 81 - .../grpc/balancer_conn_wrappers.go | 271 - .../grpc/balancer_v1_wrapper.go | 334 - .../grpc_binarylog_v1/binarylog.pb.go | 900 - vendor/google.golang.org/grpc/call.go | 74 - vendor/google.golang.org/grpc/clientconn.go | 1568 - vendor/google.golang.org/grpc/codec.go | 50 - vendor/google.golang.org/grpc/codegen.sh | 17 - .../grpc/codes/code_string.go | 62 - vendor/google.golang.org/grpc/codes/codes.go | 198 - .../grpc/connectivity/connectivity.go | 73 - .../grpc/credentials/credentials.go | 251 - .../grpc/credentials/go12.go | 30 - .../grpc/credentials/internal/syscallconn.go | 61 - .../internal/syscallconn_appengine.go | 30 - .../google.golang.org/grpc/credentials/tls.go | 225 - vendor/google.golang.org/grpc/dialoptions.go | 594 - vendor/google.golang.org/grpc/doc.go | 24 - .../grpc/encoding/encoding.go | 122 - .../grpc/encoding/proto/proto.go | 110 - .../google.golang.org/grpc/grpclog/grpclog.go | 126 - .../google.golang.org/grpc/grpclog/logger.go | 85 - .../grpc/grpclog/loggerv2.go | 195 - .../google.golang.org/grpc/health/client.go | 117 - .../grpc/health/grpc_health_v1/health.pb.go | 343 - .../grpc/health/regenerate.sh | 33 - .../google.golang.org/grpc/health/server.go | 165 - vendor/google.golang.org/grpc/install_gae.sh | 6 - vendor/google.golang.org/grpc/interceptor.go | 77 - .../grpc/internal/backoff/backoff.go | 73 - .../grpc/internal/balancerload/load.go | 46 - .../grpc/internal/binarylog/binarylog.go | 167 - .../internal/binarylog/binarylog_testutil.go | 42 - .../grpc/internal/binarylog/env_config.go | 210 - .../grpc/internal/binarylog/method_logger.go | 423 - .../grpc/internal/binarylog/regenerate.sh | 33 - .../grpc/internal/binarylog/sink.go | 162 - .../grpc/internal/binarylog/util.go | 41 - .../grpc/internal/buffer/unbounded.go | 85 - .../grpc/internal/channelz/funcs.go | 727 - .../grpc/internal/channelz/types.go | 702 - .../grpc/internal/channelz/types_linux.go | 53 - .../grpc/internal/channelz/types_nonlinux.go | 44 - .../grpc/internal/channelz/util_linux.go | 39 - .../grpc/internal/channelz/util_nonlinux.go | 26 - .../grpc/internal/envconfig/envconfig.go | 38 - .../grpc/internal/grpcrand/grpcrand.go | 56 - .../grpc/internal/grpcsync/event.go | 61 - .../grpc/internal/internal.go | 72 - .../internal/resolver/dns/dns_resolver.go | 441 - .../grpc/internal/resolver/dns/go113.go | 33 - .../resolver/passthrough/passthrough.go | 57 - .../grpc/internal/syscall/syscall_linux.go | 114 - .../grpc/internal/syscall/syscall_nonlinux.go | 73 - .../grpc/internal/transport/bdp_estimator.go | 141 - .../grpc/internal/transport/controlbuf.go | 926 - .../grpc/internal/transport/defaults.go | 49 - .../grpc/internal/transport/flowcontrol.go | 217 - .../grpc/internal/transport/handler_server.go | 435 - .../grpc/internal/transport/http2_client.go | 1454 - .../grpc/internal/transport/http2_server.go | 1253 - .../grpc/internal/transport/http_util.go | 677 - .../grpc/internal/transport/log.go | 44 - .../grpc/internal/transport/transport.go | 808 - .../grpc/keepalive/keepalive.go | 85 - .../grpc/metadata/metadata.go | 209 - .../grpc/naming/dns_resolver.go | 293 - .../google.golang.org/grpc/naming/naming.go | 68 - vendor/google.golang.org/grpc/peer/peer.go | 51 - .../google.golang.org/grpc/picker_wrapper.go | 229 - vendor/google.golang.org/grpc/pickfirst.go | 159 - vendor/google.golang.org/grpc/preloader.go | 64 - vendor/google.golang.org/grpc/proxy.go | 152 - .../grpc/resolver/resolver.go | 253 - .../grpc/resolver_conn_wrapper.go | 263 - vendor/google.golang.org/grpc/rpc_util.go | 887 - vendor/google.golang.org/grpc/server.go | 1548 - .../google.golang.org/grpc/service_config.go | 434 - .../grpc/serviceconfig/serviceconfig.go | 41 - .../google.golang.org/grpc/stats/handlers.go | 63 - vendor/google.golang.org/grpc/stats/stats.go | 311 - .../google.golang.org/grpc/status/status.go | 228 - vendor/google.golang.org/grpc/stream.go | 1529 - vendor/google.golang.org/grpc/tap/tap.go | 51 - vendor/google.golang.org/grpc/trace.go | 123 - vendor/google.golang.org/grpc/version.go | 22 - vendor/google.golang.org/grpc/vet.sh | 159 - vendor/gopkg.in/inf.v0/LICENSE | 28 - vendor/gopkg.in/inf.v0/dec.go | 615 - vendor/gopkg.in/inf.v0/rounder.go | 145 - .../square/go-jose.v2/.gitcookies.sh.enc | 1 - vendor/gopkg.in/square/go-jose.v2/.gitignore | 8 - vendor/gopkg.in/square/go-jose.v2/.travis.yml | 45 - .../gopkg.in/square/go-jose.v2/BUG-BOUNTY.md | 10 - .../square/go-jose.v2/CONTRIBUTING.md | 14 - vendor/gopkg.in/square/go-jose.v2/LICENSE | 202 - vendor/gopkg.in/square/go-jose.v2/README.md | 118 - .../gopkg.in/square/go-jose.v2/asymmetric.go | 592 - .../square/go-jose.v2/cipher/cbc_hmac.go | 196 - .../square/go-jose.v2/cipher/concat_kdf.go | 75 - .../square/go-jose.v2/cipher/ecdh_es.go | 86 - .../square/go-jose.v2/cipher/key_wrap.go | 109 - vendor/gopkg.in/square/go-jose.v2/crypter.go | 541 - vendor/gopkg.in/square/go-jose.v2/doc.go | 27 - vendor/gopkg.in/square/go-jose.v2/encoding.go | 185 - .../gopkg.in/square/go-jose.v2/json/LICENSE | 27 - .../gopkg.in/square/go-jose.v2/json/README.md | 13 - .../gopkg.in/square/go-jose.v2/json/decode.go | 1183 - .../gopkg.in/square/go-jose.v2/json/encode.go | 1197 - .../gopkg.in/square/go-jose.v2/json/indent.go | 141 - .../square/go-jose.v2/json/scanner.go | 623 - .../gopkg.in/square/go-jose.v2/json/stream.go | 480 - .../gopkg.in/square/go-jose.v2/json/tags.go | 44 - vendor/gopkg.in/square/go-jose.v2/jwe.go | 294 - vendor/gopkg.in/square/go-jose.v2/jwk.go | 760 - vendor/gopkg.in/square/go-jose.v2/jws.go | 366 - vendor/gopkg.in/square/go-jose.v2/opaque.go | 144 - vendor/gopkg.in/square/go-jose.v2/shared.go | 520 - vendor/gopkg.in/square/go-jose.v2/signing.go | 441 - .../gopkg.in/square/go-jose.v2/symmetric.go | 482 - vendor/gopkg.in/yaml.v2/.travis.yml | 17 - vendor/gopkg.in/yaml.v2/LICENSE | 201 - vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - vendor/gopkg.in/yaml.v2/README.md | 133 - vendor/gopkg.in/yaml.v2/apic.go | 744 - vendor/gopkg.in/yaml.v2/decode.go | 815 - vendor/gopkg.in/yaml.v2/emitterc.go | 1685 - vendor/gopkg.in/yaml.v2/encode.go | 390 - vendor/gopkg.in/yaml.v2/parserc.go | 1095 - vendor/gopkg.in/yaml.v2/readerc.go | 412 - vendor/gopkg.in/yaml.v2/resolve.go | 258 - vendor/gopkg.in/yaml.v2/scannerc.go | 2711 - vendor/gopkg.in/yaml.v2/sorter.go | 113 - vendor/gopkg.in/yaml.v2/writerc.go | 26 - vendor/gopkg.in/yaml.v2/yaml.go | 478 - vendor/gopkg.in/yaml.v2/yamlh.go | 739 - vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 - vendor/gopkg.in/yaml.v3/.travis.yml | 16 - vendor/gopkg.in/yaml.v3/LICENSE | 50 - vendor/gopkg.in/yaml.v3/README.md | 150 - vendor/gopkg.in/yaml.v3/apic.go | 746 - vendor/gopkg.in/yaml.v3/decode.go | 931 - vendor/gopkg.in/yaml.v3/emitterc.go | 1992 - vendor/gopkg.in/yaml.v3/encode.go | 561 - vendor/gopkg.in/yaml.v3/parserc.go | 1229 - vendor/gopkg.in/yaml.v3/readerc.go | 434 - vendor/gopkg.in/yaml.v3/resolve.go | 326 - vendor/gopkg.in/yaml.v3/scannerc.go | 3025 -- vendor/gopkg.in/yaml.v3/sorter.go | 134 - vendor/gopkg.in/yaml.v3/writerc.go | 48 - vendor/gopkg.in/yaml.v3/yaml.go | 662 - vendor/gopkg.in/yaml.v3/yamlh.go | 805 - vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 - vendor/gotest.tools/v3/assert/assert.go | 219 - vendor/gotest.tools/v3/assert/cmp/compare.go | 365 - vendor/gotest.tools/v3/assert/cmp/result.go | 99 - .../gotest.tools/v3/internal/assert/assert.go | 143 - .../gotest.tools/v3/internal/assert/result.go | 125 - .../gotest.tools/v3/internal/difflib/LICENSE | 27 - .../v3/internal/difflib/difflib.go | 423 - .../gotest.tools/v3/internal/format/diff.go | 161 - .../gotest.tools/v3/internal/format/format.go | 27 - .../gotest.tools/v3/internal/source/defers.go | 53 - .../gotest.tools/v3/internal/source/source.go | 181 - vendor/k8s.io/api/authentication/v1/doc.go | 22 + .../api/authentication/v1/generated.pb.go | 2554 + .../api/authentication/v1/generated.proto | 188 + .../k8s.io/api/authentication/v1/register.go | 52 + vendor/k8s.io/api/authentication/v1/types.go | 199 + .../v1/types_swagger_doc_generated.go | 119 + .../v1/zz_generated.deepcopy.go | 244 + .../api/core/v1/annotation_key_constants.go | 26 +- vendor/k8s.io/api/core/v1/generated.pb.go | 2643 +- vendor/k8s.io/api/core/v1/generated.proto | 245 +- vendor/k8s.io/api/core/v1/register.go | 1 - vendor/k8s.io/api/core/v1/types.go | 329 +- .../core/v1/types_swagger_doc_generated.go | 93 +- .../k8s.io/api/core/v1/well_known_labels.go | 32 +- .../k8s.io/api/core/v1/well_known_taints.go | 4 +- .../api/core/v1/zz_generated.deepcopy.go | 68 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 3 - .../apimachinery/pkg/api/errors/errors.go | 2 +- .../apimachinery/pkg/api/meta}/OWNERS | 21 +- .../apimachinery/pkg/api/meta/conditions.go | 102 + .../apimachinery/pkg/api/meta/doc.go} | 8 +- .../apimachinery/pkg/api/meta/errors.go | 121 + .../pkg/api/meta/firsthit_restmapper.go | 97 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 264 + .../apimachinery/pkg/api/meta/interfaces.go | 134 + .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 644 + .../pkg/api/meta/multirestmapper.go | 210 + .../apimachinery/pkg/api/meta/priority.go | 222 + .../apimachinery/pkg/api/meta/restmapper.go | 521 + .../apimachinery/pkg/api/resource/OWNERS | 1 - .../apimachinery/pkg/api/resource/quantity.go | 9 + .../pkg/api/resource/quantity_proto.go | 2 +- .../pkg/api/validation/path/name.go | 68 + .../pkg/apis/meta/internalversion/doc.go | 20 + .../pkg/apis/meta/internalversion/register.go | 88 + .../apis/meta/internalversion/scheme/doc.go} | 6 +- .../meta/internalversion/scheme/register.go | 39 + .../pkg/apis/meta/internalversion/types.go | 80 + .../zz_generated.conversion.go | 145 + .../internalversion/zz_generated.deepcopy.go | 96 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 7 - .../pkg/apis/meta/v1/generated.pb.go | 822 +- .../pkg/apis/meta/v1/generated.proto | 49 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 10 +- .../apimachinery/pkg/apis/meta/v1/register.go | 1 - .../apimachinery/pkg/apis/meta/v1/types.go | 78 +- .../meta/v1/types_swagger_doc_generated.go | 38 +- .../pkg/apis/meta/v1/validation/validation.go | 271 + .../apis/meta/v1/zz_generated.conversion.go | 30 - .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 47 +- .../pkg/apis/meta/v1beta1/conversion.go | 46 + .../pkg/apis/meta/v1beta1/deepcopy.go} | 6 +- .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 23 + .../pkg/apis/meta/v1beta1/generated.pb.go | 412 + .../pkg/apis/meta/v1beta1/generated.proto | 41 + .../pkg/apis/meta/v1beta1/register.go | 62 + .../pkg/apis/meta/v1beta1/types.go | 84 + .../v1beta1/types_swagger_doc_generated.go | 40 + .../meta/v1beta1/zz_generated.deepcopy.go | 59 + .../meta/v1beta1/zz_generated.defaults.go | 32 + .../k8s.io/apimachinery/pkg/labels/labels.go | 10 +- .../apimachinery/pkg/labels/selector.go | 147 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 32 +- .../pkg/runtime/serializer/json/json.go | 5 +- .../runtime/serializer/protobuf/protobuf.go | 17 +- .../serializer/recognizer/recognizer.go | 13 +- .../apimachinery/pkg/util/clock/clock.go | 42 +- .../pkg/util/httpstream/httpstream.go | 2 + .../pkg/util/httpstream/spdy/connection.go | 27 +- .../pkg/util/httpstream/spdy/roundtripper.go | 3 + .../k8s.io/apimachinery/pkg/util/net/http.go | 5 +- .../apimachinery/pkg/util/net/interface.go | 46 +- .../pkg/util/validation/field/errors.go | 2 +- .../pkg/util/validation/field/path.go | 23 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 239 +- .../apimachinery/pkg/util/yaml/decoder.go | 60 +- vendor/k8s.io/apimachinery/pkg/watch/mux.go | 28 + .../apimachinery/pkg/watch/streamwatcher.go | 36 +- .../third_party/forked/golang}/LICENSE | 0 .../third_party/forked/golang}/PATENTS | 0 vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS | 9 + .../apiserver/pkg/apis/audit}/doc.go | 8 +- .../apiserver/pkg/apis/audit/helpers.go | 38 + .../apiserver/pkg/apis/audit/register.go | 53 + .../k8s.io/apiserver/pkg/apis/audit/types.go | 286 + .../apiserver/pkg/apis/audit/v1}/doc.go | 14 +- .../pkg/apis/audit/v1/generated.pb.go | 3163 ++ .../pkg/apis/audit/v1/generated.proto | 249 + .../apiserver/pkg/apis/audit/v1/register.go | 58 + .../apiserver/pkg/apis/audit/v1/types.go | 280 + .../apis/audit/v1/zz_generated.conversion.go | 322 + .../apis/audit/v1/zz_generated.deepcopy.go | 291 + .../apis/audit/v1/zz_generated.defaults.go | 32 + .../pkg/apis/audit/v1alpha1/conversion.go | 78 + .../apiserver/pkg/apis/audit/v1alpha1/doc.go | 26 + .../pkg/apis/audit/v1alpha1/generated.pb.go | 3220 ++ .../pkg/apis/audit/v1alpha1/generated.proto | 254 + .../pkg/apis/audit/v1alpha1/register.go | 58 + .../pkg/apis/audit/v1alpha1/types.go | 303 + .../audit/v1alpha1/zz_generated.conversion.go | 339 + .../audit/v1alpha1/zz_generated.deepcopy.go | 293 + .../audit/v1alpha1/zz_generated.defaults.go | 32 + .../zz_generated.prerelease-lifecycle.go | 121 + .../pkg/apis/audit/v1beta1/conversion.go | 45 + .../apiserver/pkg/apis/audit/v1beta1/doc.go | 26 + .../pkg/apis/audit/v1beta1/generated.pb.go | 3261 ++ .../pkg/apis/audit/v1beta1/generated.proto | 263 + .../pkg/apis/audit/v1beta1/register.go | 58 + .../apiserver/pkg/apis/audit/v1beta1/types.go | 304 + .../audit/v1beta1/zz_generated.conversion.go | 334 + .../audit/v1beta1/zz_generated.deepcopy.go | 293 + .../audit/v1beta1/zz_generated.defaults.go | 32 + .../zz_generated.prerelease-lifecycle.go | 121 + .../pkg/apis/audit/zz_generated.deepcopy.go | 291 + vendor/k8s.io/apiserver/pkg/audit/OWNERS | 9 + vendor/k8s.io/apiserver/pkg/audit/context.go | 84 + vendor/k8s.io/apiserver/pkg/audit/format.go | 73 + vendor/k8s.io/apiserver/pkg/audit/metrics.go | 111 + vendor/k8s.io/apiserver/pkg/audit/request.go | 244 + vendor/k8s.io/apiserver/pkg/audit/scheme.go | 42 + vendor/k8s.io/apiserver/pkg/audit/types.go | 46 + vendor/k8s.io/apiserver/pkg/audit/union.go | 70 + .../apiserver/pkg/authentication/user/doc.go | 19 + .../apiserver/pkg/authentication/user/user.go | 84 + .../authorization/authorizer/interfaces.go | 159 + .../pkg/authorization/authorizer/rule.go | 73 + .../apiserver/pkg/endpoints/metrics/OWNERS | 8 + .../pkg/endpoints/metrics/metrics.go | 746 + .../apiserver/pkg/endpoints/request/OWNERS | 4 + .../pkg/endpoints/request/auditid.go | 66 + .../pkg/endpoints/request/context.go | 93 + .../apiserver/pkg/endpoints/request/doc.go | 20 + .../pkg/endpoints/request/received_time.go | 45 + .../pkg/endpoints/request/requestinfo.go | 274 + vendor/k8s.io/apiserver/pkg/features/OWNERS | 4 + .../apiserver/pkg/features/kube_features.go | 201 + .../apiserver/pkg/server/httplog/httplog.go | 125 +- .../pkg/util/feature/feature_gate.go | 33 + .../clientauthentication/install/install.go | 36 + .../pkg/apis/clientauthentication/types.go | 2 +- .../clientauthentication/v1/conversion.go | 28 + .../pkg/apis/clientauthentication/v1/doc.go | 24 + .../apis/clientauthentication/v1/register.go | 55 + .../pkg/apis/clientauthentication/v1/types.go | 122 + .../v1/zz_generated.conversion.go | 200 + .../v1/zz_generated.deepcopy.go | 119 + .../v1/zz_generated.defaults.go | 32 + .../v1beta1/conversion.go | 2 +- .../clientauthentication/v1beta1/types.go | 3 + .../v1beta1/zz_generated.conversion.go | 3 +- vendor/k8s.io/client-go/pkg/version/base.go | 4 +- vendor/k8s.io/client-go/pkg/version/def.bzl | 38 - .../plugin/pkg/client/auth/exec/exec.go | 130 +- .../plugin/pkg/client/auth/exec/metrics.go | 51 + vendor/k8s.io/client-go/rest/OWNERS | 3 - vendor/k8s.io/client-go/rest/client.go | 2 +- vendor/k8s.io/client-go/rest/plugin.go | 10 + vendor/k8s.io/client-go/rest/request.go | 359 +- vendor/k8s.io/client-go/rest/with_retry.go | 232 + .../client-go/tools/clientcmd/api/types.go | 49 +- .../k8s.io/client-go/tools/metrics/metrics.go | 26 +- .../client-go/tools/remotecommand/v2.go | 5 + .../client-go/transport/round_trippers.go | 61 +- .../k8s.io/client-go/transport/spdy/spdy.go | 9 +- .../client-go/transport/token_source.go | 56 +- .../k8s.io/client-go/transport/transport.go | 40 +- vendor/k8s.io/client-go/util/cert/cert.go | 1 + .../util/connrotation/connrotation.go | 88 +- .../k8s.io/client-go/util/workqueue/queue.go | 8 +- .../featuregate/feature_gate.go | 375 + vendor/k8s.io/component-base/metrics/OWNERS | 11 + .../component-base/metrics/collector.go | 190 + .../k8s.io/component-base/metrics/counter.go | 233 + vendor/k8s.io/component-base/metrics/desc.go | 225 + vendor/k8s.io/component-base/metrics/gauge.go | 236 + .../component-base/metrics/histogram.go | 220 + vendor/k8s.io/component-base/metrics/http.go | 77 + .../k8s.io/component-base/metrics/labels.go | 22 + .../metrics/legacyregistry/registry.go | 88 + .../k8s.io/component-base/metrics/metric.go | 243 + .../k8s.io/component-base/metrics/options.go | 125 + vendor/k8s.io/component-base/metrics/opts.go | 301 + .../metrics/processstarttime.go | 51 + .../metrics/processstarttime_others.go | 38 + .../metrics/processstarttime_windows.go | 33 + .../k8s.io/component-base/metrics/registry.go | 337 + .../k8s.io/component-base/metrics/summary.go | 214 + vendor/k8s.io/component-base/metrics/value.go | 60 + .../k8s.io/component-base/metrics/version.go | 37 + .../component-base/metrics/version_parser.go | 50 + .../k8s.io/component-base/metrics/wrappers.go | 95 + .../component-base/version/.gitattributes | 1 + vendor/k8s.io/component-base/version/base.go | 63 + .../k8s.io/component-base/version/version.go | 42 + .../cri-api/pkg/apis/runtime/v1/api.pb.go | 40906 ++++++++++++++++ .../cri-api/pkg/apis/runtime/v1/api.proto | 1606 + .../cri-api/pkg/apis/runtime/v1/constants.go | 55 + .../pkg/apis/runtime/v1alpha2/api.pb.go | 7090 ++- .../pkg/apis/runtime/v1alpha2/api.proto | 244 +- vendor/k8s.io/klog/v2/.gitignore | 17 - vendor/k8s.io/klog/v2/CONTRIBUTING.md | 22 - vendor/k8s.io/klog/v2/LICENSE | 191 - vendor/k8s.io/klog/v2/OWNERS | 19 - vendor/k8s.io/klog/v2/README.md | 103 - vendor/k8s.io/klog/v2/RELEASE.md | 9 - vendor/k8s.io/klog/v2/SECURITY.md | 22 - vendor/k8s.io/klog/v2/SECURITY_CONTACTS | 20 - vendor/k8s.io/klog/v2/code-of-conduct.md | 3 - vendor/k8s.io/klog/v2/klog.go | 1586 - vendor/k8s.io/klog/v2/klog_file.go | 164 - vendor/k8s.io/utils/clock/README.md | 4 + vendor/k8s.io/utils/clock/clock.go | 168 + .../k8s.io/utils/clock/testing/fake_clock.go | 360 + .../clock/testing/simple_interval_clock.go | 44 + vendor/modules.txt | 590 - .../structured-merge-diff/v4/LICENSE | 201 - .../v4/value/allocator.go | 203 - .../structured-merge-diff/v4/value/fields.go | 97 - .../v4/value/jsontagutil.go | 91 - .../structured-merge-diff/v4/value/list.go | 139 - .../v4/value/listreflect.go | 98 - .../v4/value/listunstructured.go | 74 - .../structured-merge-diff/v4/value/map.go | 270 - .../v4/value/mapreflect.go | 209 - .../v4/value/mapunstructured.go | 190 - .../v4/value/reflectcache.go | 467 - .../structured-merge-diff/v4/value/scalar.go | 50 - .../v4/value/structreflect.go | 208 - .../structured-merge-diff/v4/value/value.go | 347 - .../v4/value/valuereflect.go | 294 - .../v4/value/valueunstructured.go | 178 - vendor/sigs.k8s.io/yaml/.gitignore | 20 - vendor/sigs.k8s.io/yaml/.travis.yml | 13 - vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 - vendor/sigs.k8s.io/yaml/LICENSE | 50 - vendor/sigs.k8s.io/yaml/README.md | 123 - vendor/sigs.k8s.io/yaml/RELEASE.md | 9 - vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 - vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 - vendor/sigs.k8s.io/yaml/fields.go | 502 - vendor/sigs.k8s.io/yaml/yaml.go | 380 - vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 - version/version.go | 2 +- 3593 files changed, 112220 insertions(+), 722606 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yaml create mode 100644 .github/workflows/build-test-images.yml create mode 100644 .github/workflows/images.yml create mode 100644 .github/workflows/release/Dockerfile create mode 100644 .github/workflows/windows-periodic-trigger.yml create mode 100644 .github/workflows/windows-periodic.yml delete mode 100644 .zuul.yaml delete mode 100644 .zuul/playbooks/containerd-build/integration-test.yaml delete mode 100644 .zuul/playbooks/containerd-build/run.yaml delete mode 100644 .zuul/playbooks/containerd-build/unit-test.yaml rename {vendor/github.com/Microsoft/hcsshim => api}/Protobuild.toml (65%) rename {vendor/github.com/containerd/cgroups/stats/v1 => api}/doc.go (98%) rename vendor/github.com/containerd/continuity/proto/gen.go => api/services/containers/v1/doc.go (90%) rename vendor/github.com/containerd/continuity/sysx/nodata_linux.go => api/services/content/v1/doc.go (89%) rename {vendor/github.com/containerd/cgroups/v2/stats => api/services/diff/v1}/doc.go (97%) create mode 100644 api/services/namespaces/v1/doc.go create mode 100644 api/services/snapshots/v1/doc.go create mode 100644 api/services/tasks/v1/doc.go create mode 100644 api/services/version/v1/doc.go create mode 100644 api/types/task/doc.go create mode 100644 archive/tarheader/tarheader.go create mode 100644 archive/tarheader/tarheader_unix.go create mode 100644 content/adaptor_test.go create mode 100755 contrib/Dockerfile.test.d/cri-in-userns/docker-entrypoint.sh create mode 100644 contrib/Dockerfile.test.d/cri-in-userns/etc_containerd_config.toml create mode 100644 contrib/fuzz/archive_fuzzer.go rename vendor/github.com/containerd/continuity/devices/devices_windows.go => contrib/fuzz/cap_fuzzer.go (74%) create mode 100644 contrib/fuzz/container_fuzzer.go create mode 100644 contrib/fuzz/containerd_import_fuzzer.go create mode 100644 contrib/fuzz/content_fuzzer.go rename vendor/github.com/containerd/fifo/utils.go => contrib/fuzz/cri_fuzzer.go (58%) create mode 100644 contrib/fuzz/docker_fuzzer.go create mode 100644 contrib/fuzz/metadata_fuzzer.go create mode 100755 contrib/fuzz/oss_fuzz_build.sh create mode 100644 contrib/seccomp/kernelversion/kernel_linux.go create mode 100644 contrib/seccomp/kernelversion/kernel_linux_test.go delete mode 100644 debian/README.Debian delete mode 100644 debian/changelog delete mode 100644 debian/clean delete mode 100644 debian/compat delete mode 100644 debian/containerd.docs delete mode 100644 debian/containerd.install delete mode 120000 debian/containerd.service delete mode 100644 debian/control delete mode 100644 debian/copyright delete mode 100644 debian/docs delete mode 100644 debian/golang-github-containerd-containerd-dev.install delete mode 100755 debian/rules delete mode 100644 debian/source/format delete mode 100755 debian/tests/basic-smoke delete mode 100644 debian/tests/control delete mode 100644 debian/watch create mode 100644 defaults/defaults_darwin.go create mode 100644 diff/apply/apply_darwin.go create mode 100644 docs/tracing.md create mode 100644 events/plugin/plugin.go delete mode 100644 integration/client/restart_monitor_linux_test.go create mode 100644 integration/client/restart_monitor_test.go create mode 100644 integration/container_volume_test.go rename {vendor/k8s.io => integration}/cri-api/pkg/apis/services.go (56%) create mode 100644 integration/failpoint/cmd/cni-bridge-fp/README.md create mode 100644 integration/failpoint/cmd/cni-bridge-fp/main_linux.go rename pkg/cri/platforms/default_unix.go => integration/failpoint/cmd/containerd-shim-runc-fp-v1/main_linux.go (71%) create mode 100644 integration/failpoint/cmd/containerd-shim-runc-fp-v1/plugin_linux.go create mode 100644 integration/images/README.md create mode 100644 integration/images/volume-copy-up/Dockerfile_windows create mode 100644 integration/images/volume-ownership/Dockerfile_windows rename vendor/github.com/containerd/continuity/fs/diff_windows.go => integration/images/volume-ownership/tools/get_owner_windows.go (53%) create mode 100644 integration/issue7496_linux_test.go create mode 100644 integration/sandbox_clean_remove_windows_test.go create mode 100644 integration/sandbox_run_rollback_test.go create mode 100644 integration/shim_dial_unix_test.go rename vendor/github.com/containerd/continuity/fs/hardlink_unix.go => integration/volume_copy_up_unix_test.go (67%) rename vendor/github.com/containerd/continuity/driver/lchmod_unix.go => integration/volume_copy_up_windows_test.go (61%) create mode 100644 integration/windows_hostprocess_test.go create mode 100644 leases/lease_test.go rename pkg/cri/platforms/default_windows.go => metrics/cgroups/common/type.go (71%) create mode 100644 metrics/cgroups/metrics_test.go create mode 100644 metrics/metrics.go rename sys/mount_linux.go => mount/fmountat_linux.go (90%) rename sys/mount_linux_test.go => mount/fmountat_linux_test.go (89%) create mode 100644 mount/mount_test.go rename {sys => mount}/subprocess_unsafe_linux.go (98%) rename {sys => mount}/subprocess_unsafe_linux.s (100%) create mode 100644 oci/mounts.go rename vendor/github.com/containerd/go-runc/command_other.go => oci/mounts_freebsd.go (62%) create mode 100644 oci/utils_unix_go116_test.go rename vendor/github.com/containerd/continuity/sysx/nodata_unix.go => oci/utils_unix_go117_test.go (80%) create mode 100644 oci/utils_unix_test.go create mode 100644 pkg/atomicfile/file.go create mode 100644 pkg/atomicfile/file_test.go create mode 100644 pkg/cri/io/metrics.go rename vendor/github.com/containerd/cgroups/state.go => pkg/cri/labels/labels.go (50%) create mode 100644 pkg/cri/opts/spec_windows_test.go create mode 100644 pkg/cri/server/container_checkpoint.go rename vendor/github.com/containerd/console/pty_unix.go => pkg/cri/server/container_events.go (64%) create mode 100644 pkg/cri/server/container_update_resources.go create mode 100644 pkg/cri/server/metrics.go create mode 100644 pkg/cri/server/rdt_linux.go rename vendor/github.com/containerd/continuity/devices/mknod_unix.go => pkg/cri/server/rdt_stub_linux.go (74%) create mode 100644 pkg/cri/server/sandbox_stats.go create mode 100644 pkg/cri/server/sandbox_stats_linux.go create mode 100644 pkg/cri/server/sandbox_stats_list.go create mode 100644 pkg/cri/server/sandbox_stats_other.go create mode 100644 pkg/cri/server/sandbox_stats_windows.go create mode 100644 pkg/cri/store/stats/stats.go create mode 100644 pkg/failpoint/fail.go create mode 100644 pkg/failpoint/fail_test.go create mode 100644 pkg/kmutex/kmutex.go create mode 100644 pkg/kmutex/kmutex_test.go rename vendor/github.com/containerd/btrfs/ioctl.go => pkg/kmutex/noop.go (74%) create mode 100644 pkg/randutil/randutil.go rename {vendor/github.com/containerd/btrfs => pkg/runtimeoptions/v1}/doc.go (84%) create mode 100644 pkg/schedcore/prctl_linux.go create mode 100644 pkg/shutdown/shutdown.go create mode 100644 pkg/snapshotters/annotations.go create mode 100644 pkg/snapshotters/annotations_test.go create mode 100644 platforms/cpuinfo_linux.go create mode 100644 platforms/cpuinfo_linux_test.go create mode 100644 platforms/cpuinfo_other.go create mode 100644 platforms/defaults_darwin.go rename platforms/{defaults_test.go => defaults_unix_test.go} (96%) rename platforms/{cpuinfo_test.go => platforms_windows_test.go} (58%) create mode 100644 plugin/plugin_test.go rename protobuf/plugin/{ => fieldpath}/helpers.go (66%) create mode 100644 releases/cri-containerd.DEPRECATED.txt delete mode 100644 releases/v1.5.1.toml delete mode 100644 releases/v1.5.3.toml delete mode 100644 releases/v1.5.4.toml delete mode 100644 releases/v1.5.5.toml delete mode 100644 releases/v1.5.6.toml delete mode 100644 releases/v1.5.8.toml delete mode 100644 releases/v1.5.9.toml create mode 100644 releases/v1.6.0.toml create mode 100644 releases/v1.6.1.toml create mode 100644 releases/v1.6.10.toml create mode 100644 releases/v1.6.11.toml create mode 100644 releases/v1.6.12.toml create mode 100644 releases/v1.6.13.toml rename releases/{v1.5.7.toml => v1.6.14.toml} (50%) create mode 100644 releases/v1.6.15.toml create mode 100644 releases/v1.6.16.toml create mode 100644 releases/v1.6.17.toml create mode 100644 releases/v1.6.18.toml create mode 100644 releases/v1.6.19.toml create mode 100644 releases/v1.6.2.toml create mode 100644 releases/v1.6.20.toml create mode 100644 releases/v1.6.21.toml create mode 100644 releases/v1.6.22.toml create mode 100644 releases/v1.6.23.toml create mode 100644 releases/v1.6.24.toml create mode 100644 releases/v1.6.3.toml create mode 100644 releases/v1.6.4.toml create mode 100644 releases/v1.6.5.toml create mode 100644 releases/v1.6.6.toml create mode 100644 releases/v1.6.7.toml rename releases/{v1.5.2.toml => v1.6.8.toml} (51%) create mode 100644 releases/v1.6.9.toml create mode 100644 remotes/docker/auth/fetch_test.go create mode 100644 remotes/docker/auth/parse_test.go create mode 100644 runtime/v2/runc/manager/manager_linux.go create mode 100644 runtime/v2/runc/task/plugin/plugin_linux.go create mode 100644 runtime/v2/runc/task/service.go create mode 100644 runtime/v2/shim/util_test.go create mode 100644 runtime/v2/shim_load.go create mode 100644 script/setup/enable_docker_tls_on_windows.ps1 create mode 100644 script/setup/enable_ssh_windows.ps1 rename vendor/github.com/containerd/btrfs/Makefile => script/setup/install-failpoint-binaries (54%) mode change 100644 => 100755 create mode 100755 script/setup/install-runhcs-shim create mode 100644 script/setup/prepare_env_windows.ps1 create mode 100644 script/setup/prepare_windows_docker_helper.ps1 create mode 100755 script/verify-go-modules.sh create mode 100644 services/server/namespace.go rename vendor/github.com/containerd/cgroups/v2/rdma.go => services/tasks/local_darwin.go (56%) rename vendor/github.com/containerd/continuity/testutil/helpers.go => services/tasks/rdt_default.go (78%) create mode 100644 services/tasks/rdt_linux.go delete mode 100644 signals_unix.go delete mode 100644 signals_windows.go rename vendor/github.com/containerd/console/tc_darwin.go => snapshots/devmapper/blkdiscard/blkdiscard.go (50%) delete mode 100644 sys/stat_bsd.go delete mode 100644 sys/stat_openbsd.go delete mode 100644 sys/stat_unix.go create mode 100644 tracing/log.go create mode 100644 tracing/plugin/otlp.go rename vendor/github.com/containerd/continuity/fs/copy_darwinopenbsdsolaris.go => tracing/tracing.go (53%) delete mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore delete mode 100644 vendor/github.com/Microsoft/go-winio/CODEOWNERS delete mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE delete mode 100644 vendor/github.com/Microsoft/go-winio/README.md delete mode 100644 vendor/github.com/Microsoft/go-winio/backup.go delete mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/noop.go delete mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/strconv.go delete mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar.go delete mode 100644 vendor/github.com/Microsoft/go-winio/ea.go delete mode 100644 vendor/github.com/Microsoft/go-winio/file.go delete mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go delete mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/HookTest.wprp delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go delete mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go delete mode 100644 vendor/github.com/Microsoft/go-winio/sd.go delete mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go delete mode 100644 vendor/github.com/Microsoft/go-winio/vhd/vhd.go delete mode 100644 vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/.gitattributes delete mode 100644 vendor/github.com/Microsoft/hcsshim/.gitignore delete mode 100644 vendor/github.com/Microsoft/hcsshim/CODEOWNERS delete mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE delete mode 100644 vendor/github.com/Microsoft/hcsshim/README.md delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/doc.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/next.pb.txt delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/doc.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/next.pb.txt delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/attach.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/detach.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/export.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/format.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/import.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/mount.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/setup.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/storage.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/container.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/errors.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcn.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcsshim.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnsendpoint.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnsglobals.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnsnetwork.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicylist.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnssupport.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/interface.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/g.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/span.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/layer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/process.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go delete mode 100644 vendor/github.com/beorn7/perks/LICENSE delete mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt delete mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go delete mode 100644 vendor/github.com/bits-and-blooms/bitset/.gitignore delete mode 100644 vendor/github.com/bits-and-blooms/bitset/.travis.yml delete mode 100644 vendor/github.com/bits-and-blooms/bitset/LICENSE delete mode 100644 vendor/github.com/bits-and-blooms/bitset/README.md delete mode 100644 vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml delete mode 100644 vendor/github.com/bits-and-blooms/bitset/bitset.go delete mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt.go delete mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_19.go delete mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go delete mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s delete mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go delete mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go delete mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml delete mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt delete mode 100644 vendor/github.com/cespare/xxhash/v2/README.md delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go delete mode 100644 vendor/github.com/cilium/ebpf/.clang-format delete mode 100644 vendor/github.com/cilium/ebpf/.gitignore delete mode 100644 vendor/github.com/cilium/ebpf/.golangci.yaml delete mode 100644 vendor/github.com/cilium/ebpf/ARCHITECTURE.md delete mode 100644 vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/cilium/ebpf/CONTRIBUTING.md delete mode 100644 vendor/github.com/cilium/ebpf/LICENSE delete mode 100644 vendor/github.com/cilium/ebpf/Makefile delete mode 100644 vendor/github.com/cilium/ebpf/README.md delete mode 100644 vendor/github.com/cilium/ebpf/asm/alu.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/alu_string.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/doc.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/func.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/func_string.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/instruction.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/jump.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/jump_string.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/load_store.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/load_store_string.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/opcode.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/opcode_string.go delete mode 100644 vendor/github.com/cilium/ebpf/asm/register.go delete mode 100644 vendor/github.com/cilium/ebpf/collection.go delete mode 100644 vendor/github.com/cilium/ebpf/doc.go delete mode 100644 vendor/github.com/cilium/ebpf/elf_reader.go delete mode 100644 vendor/github.com/cilium/ebpf/elf_reader_fuzz.go delete mode 100644 vendor/github.com/cilium/ebpf/info.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/btf.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/btf_types.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/core.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/doc.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/ext_info.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/fuzz.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/strings.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/types.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/cpu.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/elf.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/endian.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/errors.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/fd.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/feature.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/io.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/pinning.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/ptr.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/ptr_32_be.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/ptr_32_le.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/ptr_64.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/syscall.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/syscall_string.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/unix/types_linux.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/unix/types_other.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/version.go delete mode 100644 vendor/github.com/cilium/ebpf/link/cgroup.go delete mode 100644 vendor/github.com/cilium/ebpf/link/doc.go delete mode 100644 vendor/github.com/cilium/ebpf/link/iter.go delete mode 100644 vendor/github.com/cilium/ebpf/link/kprobe.go delete mode 100644 vendor/github.com/cilium/ebpf/link/link.go delete mode 100644 vendor/github.com/cilium/ebpf/link/netns.go delete mode 100644 vendor/github.com/cilium/ebpf/link/perf_event.go delete mode 100644 vendor/github.com/cilium/ebpf/link/platform.go delete mode 100644 vendor/github.com/cilium/ebpf/link/program.go delete mode 100644 vendor/github.com/cilium/ebpf/link/raw_tracepoint.go delete mode 100644 vendor/github.com/cilium/ebpf/link/syscalls.go delete mode 100644 vendor/github.com/cilium/ebpf/link/tracepoint.go delete mode 100644 vendor/github.com/cilium/ebpf/link/uprobe.go delete mode 100644 vendor/github.com/cilium/ebpf/linker.go delete mode 100644 vendor/github.com/cilium/ebpf/map.go delete mode 100644 vendor/github.com/cilium/ebpf/marshalers.go delete mode 100644 vendor/github.com/cilium/ebpf/prog.go delete mode 100644 vendor/github.com/cilium/ebpf/run-tests.sh delete mode 100644 vendor/github.com/cilium/ebpf/syscalls.go delete mode 100644 vendor/github.com/cilium/ebpf/types.go delete mode 100644 vendor/github.com/cilium/ebpf/types_string.go delete mode 100644 vendor/github.com/containerd/btrfs/.gitignore delete mode 100644 vendor/github.com/containerd/btrfs/LICENSE delete mode 100644 vendor/github.com/containerd/btrfs/README.md delete mode 100644 vendor/github.com/containerd/btrfs/btrfs.c delete mode 100644 vendor/github.com/containerd/btrfs/btrfs.go delete mode 100644 vendor/github.com/containerd/btrfs/btrfs.h delete mode 100644 vendor/github.com/containerd/btrfs/helpers.go delete mode 100644 vendor/github.com/containerd/btrfs/info.go delete mode 100644 vendor/github.com/containerd/cgroups/.gitignore delete mode 100644 vendor/github.com/containerd/cgroups/LICENSE delete mode 100644 vendor/github.com/containerd/cgroups/Makefile delete mode 100644 vendor/github.com/containerd/cgroups/Protobuild.toml delete mode 100644 vendor/github.com/containerd/cgroups/README.md delete mode 100644 vendor/github.com/containerd/cgroups/Vagrantfile delete mode 100644 vendor/github.com/containerd/cgroups/blkio.go delete mode 100644 vendor/github.com/containerd/cgroups/cgroup.go delete mode 100644 vendor/github.com/containerd/cgroups/control.go delete mode 100644 vendor/github.com/containerd/cgroups/cpu.go delete mode 100644 vendor/github.com/containerd/cgroups/cpuacct.go delete mode 100644 vendor/github.com/containerd/cgroups/cpuset.go delete mode 100644 vendor/github.com/containerd/cgroups/devices.go delete mode 100644 vendor/github.com/containerd/cgroups/errors.go delete mode 100644 vendor/github.com/containerd/cgroups/freezer.go delete mode 100644 vendor/github.com/containerd/cgroups/hierarchy.go delete mode 100644 vendor/github.com/containerd/cgroups/hugetlb.go delete mode 100644 vendor/github.com/containerd/cgroups/memory.go delete mode 100644 vendor/github.com/containerd/cgroups/named.go delete mode 100644 vendor/github.com/containerd/cgroups/net_cls.go delete mode 100644 vendor/github.com/containerd/cgroups/net_prio.go delete mode 100644 vendor/github.com/containerd/cgroups/opts.go delete mode 100644 vendor/github.com/containerd/cgroups/paths.go delete mode 100644 vendor/github.com/containerd/cgroups/perf_event.go delete mode 100644 vendor/github.com/containerd/cgroups/pids.go delete mode 100644 vendor/github.com/containerd/cgroups/rdma.go delete mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go delete mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt delete mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.proto delete mode 100644 vendor/github.com/containerd/cgroups/subsystem.go delete mode 100644 vendor/github.com/containerd/cgroups/systemd.go delete mode 100644 vendor/github.com/containerd/cgroups/ticks.go delete mode 100644 vendor/github.com/containerd/cgroups/utils.go delete mode 100644 vendor/github.com/containerd/cgroups/v1.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/cpu.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/devicefilter.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/ebpf.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/errors.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/io.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/manager.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/memory.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/paths.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/pids.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/state.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go delete mode 100644 vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.txt delete mode 100644 vendor/github.com/containerd/cgroups/v2/stats/metrics.proto delete mode 100644 vendor/github.com/containerd/cgroups/v2/utils.go delete mode 100644 vendor/github.com/containerd/console/.golangci.yml delete mode 100644 vendor/github.com/containerd/console/LICENSE delete mode 100644 vendor/github.com/containerd/console/README.md delete mode 100644 vendor/github.com/containerd/console/console.go delete mode 100644 vendor/github.com/containerd/console/console_linux.go delete mode 100644 vendor/github.com/containerd/console/console_unix.go delete mode 100644 vendor/github.com/containerd/console/console_windows.go delete mode 100644 vendor/github.com/containerd/console/pty_freebsd_cgo.go delete mode 100644 vendor/github.com/containerd/console/pty_freebsd_nocgo.go delete mode 100644 vendor/github.com/containerd/console/tc_freebsd_cgo.go delete mode 100644 vendor/github.com/containerd/console/tc_freebsd_nocgo.go delete mode 100644 vendor/github.com/containerd/console/tc_linux.go delete mode 100644 vendor/github.com/containerd/console/tc_netbsd.go delete mode 100644 vendor/github.com/containerd/console/tc_openbsd_cgo.go delete mode 100644 vendor/github.com/containerd/console/tc_openbsd_nocgo.go delete mode 100644 vendor/github.com/containerd/console/tc_solaris_cgo.go delete mode 100644 vendor/github.com/containerd/console/tc_solaris_nocgo.go delete mode 100644 vendor/github.com/containerd/console/tc_unix.go delete mode 100644 vendor/github.com/containerd/continuity/.gitignore delete mode 100644 vendor/github.com/containerd/continuity/.golangci.yml delete mode 100644 vendor/github.com/containerd/continuity/.mailmap delete mode 100644 vendor/github.com/containerd/continuity/AUTHORS delete mode 100644 vendor/github.com/containerd/continuity/LICENSE delete mode 100644 vendor/github.com/containerd/continuity/Makefile delete mode 100644 vendor/github.com/containerd/continuity/README.md delete mode 100644 vendor/github.com/containerd/continuity/context.go delete mode 100644 vendor/github.com/containerd/continuity/devices/devices.go delete mode 100644 vendor/github.com/containerd/continuity/devices/devices_unix.go delete mode 100644 vendor/github.com/containerd/continuity/devices/mknod_freebsd.go delete mode 100644 vendor/github.com/containerd/continuity/digests.go delete mode 100644 vendor/github.com/containerd/continuity/driver/driver.go delete mode 100644 vendor/github.com/containerd/continuity/driver/driver_unix.go delete mode 100644 vendor/github.com/containerd/continuity/driver/driver_windows.go delete mode 100644 vendor/github.com/containerd/continuity/driver/lchmod_linux.go delete mode 100644 vendor/github.com/containerd/continuity/driver/utils.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy_freebsd.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy_linux.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/diff.go delete mode 100644 vendor/github.com/containerd/continuity/fs/diff_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/dtype_linux.go delete mode 100644 vendor/github.com/containerd/continuity/fs/du.go delete mode 100644 vendor/github.com/containerd/continuity/fs/du_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/du_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/compare.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/compare_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/file.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/file_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/file_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/fstest/testsuite.go delete mode 100644 vendor/github.com/containerd/continuity/fs/hardlink.go delete mode 100644 vendor/github.com/containerd/continuity/fs/hardlink_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/path.go delete mode 100644 vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go delete mode 100644 vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go delete mode 100644 vendor/github.com/containerd/continuity/fs/time.go delete mode 100644 vendor/github.com/containerd/continuity/groups_unix.go delete mode 100644 vendor/github.com/containerd/continuity/hardlinks.go delete mode 100644 vendor/github.com/containerd/continuity/hardlinks_unix.go delete mode 100644 vendor/github.com/containerd/continuity/hardlinks_windows.go delete mode 100644 vendor/github.com/containerd/continuity/ioutils.go delete mode 100644 vendor/github.com/containerd/continuity/manifest.go delete mode 100644 vendor/github.com/containerd/continuity/pathdriver/path_driver.go delete mode 100644 vendor/github.com/containerd/continuity/proto/manifest.pb.go delete mode 100644 vendor/github.com/containerd/continuity/proto/manifest.proto delete mode 100644 vendor/github.com/containerd/continuity/resource.go delete mode 100644 vendor/github.com/containerd/continuity/resource_unix.go delete mode 100644 vendor/github.com/containerd/continuity/resource_windows.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/README.md delete mode 100644 vendor/github.com/containerd/continuity/sysx/generate.sh delete mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_solaris.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/xattr.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go delete mode 100644 vendor/github.com/containerd/continuity/testutil/helpers_unix.go delete mode 100644 vendor/github.com/containerd/continuity/testutil/helpers_windows.go delete mode 100644 vendor/github.com/containerd/continuity/testutil/loopback/loopback_linux.go delete mode 100644 vendor/github.com/containerd/continuity/testutil/mount_linux.go delete mode 100644 vendor/github.com/containerd/continuity/testutil/mount_other.go delete mode 100644 vendor/github.com/containerd/fifo/.gitattributes delete mode 100644 vendor/github.com/containerd/fifo/.gitignore delete mode 100644 vendor/github.com/containerd/fifo/.golangci.yml delete mode 100644 vendor/github.com/containerd/fifo/LICENSE delete mode 100644 vendor/github.com/containerd/fifo/Makefile delete mode 100644 vendor/github.com/containerd/fifo/errors.go delete mode 100644 vendor/github.com/containerd/fifo/fifo.go delete mode 100644 vendor/github.com/containerd/fifo/handle_linux.go delete mode 100644 vendor/github.com/containerd/fifo/handle_nolinux.go delete mode 100644 vendor/github.com/containerd/fifo/raw.go delete mode 100644 vendor/github.com/containerd/fifo/readme.md delete mode 100644 vendor/github.com/containerd/go-cni/.golangci.yml delete mode 100644 vendor/github.com/containerd/go-cni/LICENSE delete mode 100644 vendor/github.com/containerd/go-cni/README.md delete mode 100644 vendor/github.com/containerd/go-cni/cni.go delete mode 100644 vendor/github.com/containerd/go-cni/deprecated.go delete mode 100644 vendor/github.com/containerd/go-cni/errors.go delete mode 100644 vendor/github.com/containerd/go-cni/helper.go delete mode 100644 vendor/github.com/containerd/go-cni/namespace.go delete mode 100644 vendor/github.com/containerd/go-cni/namespace_opts.go delete mode 100644 vendor/github.com/containerd/go-cni/opts.go delete mode 100644 vendor/github.com/containerd/go-cni/result.go delete mode 100644 vendor/github.com/containerd/go-cni/testutils.go delete mode 100644 vendor/github.com/containerd/go-cni/types.go delete mode 100644 vendor/github.com/containerd/go-runc/.travis.yml delete mode 100644 vendor/github.com/containerd/go-runc/LICENSE delete mode 100644 vendor/github.com/containerd/go-runc/README.md delete mode 100644 vendor/github.com/containerd/go-runc/command_linux.go delete mode 100644 vendor/github.com/containerd/go-runc/console.go delete mode 100644 vendor/github.com/containerd/go-runc/container.go delete mode 100644 vendor/github.com/containerd/go-runc/events.go delete mode 100644 vendor/github.com/containerd/go-runc/io.go delete mode 100644 vendor/github.com/containerd/go-runc/io_unix.go delete mode 100644 vendor/github.com/containerd/go-runc/io_windows.go delete mode 100644 vendor/github.com/containerd/go-runc/monitor.go delete mode 100644 vendor/github.com/containerd/go-runc/runc.go delete mode 100644 vendor/github.com/containerd/go-runc/runc_unix.go delete mode 100644 vendor/github.com/containerd/go-runc/runc_windows.go delete mode 100644 vendor/github.com/containerd/go-runc/utils.go rename vendor/github.com/containerd/{cgroups/v2/hugetlb.go => imgcrypt/images/encryption/any.go} (62%) delete mode 100644 vendor/github.com/containerd/ttrpc/.gitignore delete mode 100644 vendor/github.com/containerd/ttrpc/LICENSE delete mode 100644 vendor/github.com/containerd/ttrpc/README.md delete mode 100644 vendor/github.com/containerd/ttrpc/channel.go delete mode 100644 vendor/github.com/containerd/ttrpc/client.go delete mode 100644 vendor/github.com/containerd/ttrpc/codec.go delete mode 100644 vendor/github.com/containerd/ttrpc/config.go delete mode 100644 vendor/github.com/containerd/ttrpc/handshake.go delete mode 100644 vendor/github.com/containerd/ttrpc/interceptor.go delete mode 100644 vendor/github.com/containerd/ttrpc/metadata.go delete mode 100644 vendor/github.com/containerd/ttrpc/plugin/generator.go delete mode 100644 vendor/github.com/containerd/ttrpc/server.go delete mode 100644 vendor/github.com/containerd/ttrpc/services.go delete mode 100644 vendor/github.com/containerd/ttrpc/types.go delete mode 100644 vendor/github.com/containerd/ttrpc/unixcreds_linux.go delete mode 100644 vendor/github.com/containerd/typeurl/.gitignore delete mode 100644 vendor/github.com/containerd/typeurl/LICENSE delete mode 100644 vendor/github.com/containerd/typeurl/README.md delete mode 100644 vendor/github.com/containerd/typeurl/doc.go delete mode 100644 vendor/github.com/containerd/typeurl/types.go delete mode 100644 vendor/github.com/containernetworking/cni/LICENSE delete mode 100644 vendor/github.com/containernetworking/cni/libcni/api.go delete mode 100644 vendor/github.com/containernetworking/cni/libcni/conf.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/args.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/exec.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/find.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/types/020/types.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/types/args.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/types/current/types.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/types/types.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/utils/utils.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/version/conf.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/version/plugin.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/version/reconcile.go delete mode 100644 vendor/github.com/containernetworking/cni/pkg/version/version.go delete mode 100644 vendor/github.com/containernetworking/plugins/LICENSE delete mode 100644 vendor/github.com/containernetworking/plugins/pkg/ns/README.md delete mode 100644 vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go delete mode 100644 vendor/github.com/containers/ocicrypt/.travis.yml delete mode 100644 vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md delete mode 100644 vendor/github.com/containers/ocicrypt/LICENSE delete mode 100644 vendor/github.com/containers/ocicrypt/MAINTAINERS delete mode 100644 vendor/github.com/containers/ocicrypt/Makefile delete mode 100644 vendor/github.com/containers/ocicrypt/README.md delete mode 100644 vendor/github.com/containers/ocicrypt/SECURITY.md delete mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go delete mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go delete mode 100644 vendor/github.com/containers/ocicrypt/config/config.go delete mode 100644 vendor/github.com/containers/ocicrypt/config/constructors.go delete mode 100644 vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go delete mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go delete mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go delete mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go delete mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go delete mode 100644 vendor/github.com/containers/ocicrypt/encryption.go delete mode 100644 vendor/github.com/containers/ocicrypt/gpg.go delete mode 100644 vendor/github.com/containers/ocicrypt/gpgvault.go delete mode 100644 vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go delete mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go delete mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keywrap.go delete mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go delete mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go delete mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go delete mode 100644 vendor/github.com/containers/ocicrypt/reader.go delete mode 100644 vendor/github.com/containers/ocicrypt/spec/spec.go delete mode 100644 vendor/github.com/containers/ocicrypt/utils/delayedreader.go delete mode 100644 vendor/github.com/containers/ocicrypt/utils/ioutils.go delete mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go delete mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto delete mode 100644 vendor/github.com/containers/ocicrypt/utils/testing.go delete mode 100644 vendor/github.com/containers/ocicrypt/utils/utils.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/LICENSE delete mode 100644 vendor/github.com/coreos/go-systemd/v22/NOTICE delete mode 100644 vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/methods.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/properties.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/set.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go delete mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go delete mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md delete mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go delete mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/docker/go-events/.gitignore delete mode 100644 vendor/github.com/docker/go-events/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/go-events/LICENSE delete mode 100644 vendor/github.com/docker/go-events/MAINTAINERS delete mode 100644 vendor/github.com/docker/go-events/README.md delete mode 100644 vendor/github.com/docker/go-events/broadcast.go delete mode 100644 vendor/github.com/docker/go-events/channel.go delete mode 100644 vendor/github.com/docker/go-events/errors.go delete mode 100644 vendor/github.com/docker/go-events/event.go delete mode 100644 vendor/github.com/docker/go-events/filter.go delete mode 100644 vendor/github.com/docker/go-events/queue.go delete mode 100644 vendor/github.com/docker/go-events/retry.go delete mode 100644 vendor/github.com/docker/go-metrics/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/go-metrics/LICENSE delete mode 100644 vendor/github.com/docker/go-metrics/LICENSE.docs delete mode 100644 vendor/github.com/docker/go-metrics/NOTICE delete mode 100644 vendor/github.com/docker/go-metrics/README.md delete mode 100644 vendor/github.com/docker/go-metrics/counter.go delete mode 100644 vendor/github.com/docker/go-metrics/docs.go delete mode 100644 vendor/github.com/docker/go-metrics/gauge.go delete mode 100644 vendor/github.com/docker/go-metrics/handler.go delete mode 100644 vendor/github.com/docker/go-metrics/helpers.go delete mode 100644 vendor/github.com/docker/go-metrics/namespace.go delete mode 100644 vendor/github.com/docker/go-metrics/register.go delete mode 100644 vendor/github.com/docker/go-metrics/timer.go delete mode 100644 vendor/github.com/docker/go-metrics/unit.go delete mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/go-units/LICENSE delete mode 100644 vendor/github.com/docker/go-units/MAINTAINERS delete mode 100644 vendor/github.com/docker/go-units/README.md delete mode 100644 vendor/github.com/docker/go-units/circle.yml delete mode 100644 vendor/github.com/docker/go-units/duration.go delete mode 100644 vendor/github.com/docker/go-units/size.go delete mode 100644 vendor/github.com/docker/go-units/ulimit.go delete mode 100644 vendor/github.com/docker/spdystream/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/spdystream/LICENSE delete mode 100644 vendor/github.com/docker/spdystream/LICENSE.docs delete mode 100644 vendor/github.com/docker/spdystream/MAINTAINERS delete mode 100644 vendor/github.com/docker/spdystream/README.md delete mode 100644 vendor/github.com/docker/spdystream/connection.go delete mode 100644 vendor/github.com/docker/spdystream/handlers.go delete mode 100644 vendor/github.com/docker/spdystream/priority.go delete mode 100644 vendor/github.com/docker/spdystream/spdy/dictionary.go delete mode 100644 vendor/github.com/docker/spdystream/spdy/read.go delete mode 100644 vendor/github.com/docker/spdystream/spdy/types.go delete mode 100644 vendor/github.com/docker/spdystream/spdy/write.go delete mode 100644 vendor/github.com/docker/spdystream/stream.go delete mode 100644 vendor/github.com/docker/spdystream/utils.go delete mode 100644 vendor/github.com/emicklei/go-restful/.gitignore delete mode 100644 vendor/github.com/emicklei/go-restful/.travis.yml delete mode 100644 vendor/github.com/emicklei/go-restful/CHANGES.md delete mode 100644 vendor/github.com/emicklei/go-restful/LICENSE delete mode 100644 vendor/github.com/emicklei/go-restful/Makefile delete mode 100644 vendor/github.com/emicklei/go-restful/README.md delete mode 100644 vendor/github.com/emicklei/go-restful/Srcfile delete mode 100644 vendor/github.com/emicklei/go-restful/bench_test.sh delete mode 100644 vendor/github.com/emicklei/go-restful/compress.go delete mode 100644 vendor/github.com/emicklei/go-restful/compressor_cache.go delete mode 100644 vendor/github.com/emicklei/go-restful/compressor_pools.go delete mode 100644 vendor/github.com/emicklei/go-restful/compressors.go delete mode 100644 vendor/github.com/emicklei/go-restful/constants.go delete mode 100644 vendor/github.com/emicklei/go-restful/container.go delete mode 100644 vendor/github.com/emicklei/go-restful/cors_filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/coverage.sh delete mode 100644 vendor/github.com/emicklei/go-restful/curly.go delete mode 100644 vendor/github.com/emicklei/go-restful/curly_route.go delete mode 100644 vendor/github.com/emicklei/go-restful/doc.go delete mode 100644 vendor/github.com/emicklei/go-restful/entity_accessors.go delete mode 100644 vendor/github.com/emicklei/go-restful/filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/json.go delete mode 100644 vendor/github.com/emicklei/go-restful/jsoniter.go delete mode 100644 vendor/github.com/emicklei/go-restful/jsr311.go delete mode 100644 vendor/github.com/emicklei/go-restful/log/log.go delete mode 100644 vendor/github.com/emicklei/go-restful/logger.go delete mode 100644 vendor/github.com/emicklei/go-restful/mime.go delete mode 100644 vendor/github.com/emicklei/go-restful/options_filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/parameter.go delete mode 100644 vendor/github.com/emicklei/go-restful/path_expression.go delete mode 100644 vendor/github.com/emicklei/go-restful/path_processor.go delete mode 100644 vendor/github.com/emicklei/go-restful/request.go delete mode 100644 vendor/github.com/emicklei/go-restful/response.go delete mode 100644 vendor/github.com/emicklei/go-restful/route.go delete mode 100644 vendor/github.com/emicklei/go-restful/route_builder.go delete mode 100644 vendor/github.com/emicklei/go-restful/router.go delete mode 100644 vendor/github.com/emicklei/go-restful/service_error.go delete mode 100644 vendor/github.com/emicklei/go-restful/web_service.go delete mode 100644 vendor/github.com/emicklei/go-restful/web_service_container.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore delete mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS delete mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md delete mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md delete mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE delete mode 100644 vendor/github.com/fsnotify/fsnotify/README.md delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go delete mode 100644 vendor/github.com/go-logr/logr/LICENSE delete mode 100644 vendor/github.com/go-logr/logr/README.md delete mode 100644 vendor/github.com/go-logr/logr/logr.go delete mode 100644 vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md delete mode 100644 vendor/github.com/godbus/dbus/v5/LICENSE delete mode 100644 vendor/github.com/godbus/dbus/v5/MAINTAINERS delete mode 100644 vendor/github.com/godbus/dbus/v5/README.markdown delete mode 100644 vendor/github.com/godbus/dbus/v5/auth.go delete mode 100644 vendor/github.com/godbus/dbus/v5/auth_anonymous.go delete mode 100644 vendor/github.com/godbus/dbus/v5/auth_external.go delete mode 100644 vendor/github.com/godbus/dbus/v5/auth_sha1.go delete mode 100644 vendor/github.com/godbus/dbus/v5/call.go delete mode 100644 vendor/github.com/godbus/dbus/v5/conn.go delete mode 100644 vendor/github.com/godbus/dbus/v5/conn_darwin.go delete mode 100644 vendor/github.com/godbus/dbus/v5/conn_other.go delete mode 100644 vendor/github.com/godbus/dbus/v5/conn_unix.go delete mode 100644 vendor/github.com/godbus/dbus/v5/conn_windows.go delete mode 100644 vendor/github.com/godbus/dbus/v5/dbus.go delete mode 100644 vendor/github.com/godbus/dbus/v5/decoder.go delete mode 100644 vendor/github.com/godbus/dbus/v5/default_handler.go delete mode 100644 vendor/github.com/godbus/dbus/v5/doc.go delete mode 100644 vendor/github.com/godbus/dbus/v5/encoder.go delete mode 100644 vendor/github.com/godbus/dbus/v5/export.go delete mode 100644 vendor/github.com/godbus/dbus/v5/homedir.go delete mode 100644 vendor/github.com/godbus/dbus/v5/homedir_dynamic.go delete mode 100644 vendor/github.com/godbus/dbus/v5/homedir_static.go delete mode 100644 vendor/github.com/godbus/dbus/v5/match.go delete mode 100644 vendor/github.com/godbus/dbus/v5/message.go delete mode 100644 vendor/github.com/godbus/dbus/v5/object.go delete mode 100644 vendor/github.com/godbus/dbus/v5/sequence.go delete mode 100644 vendor/github.com/godbus/dbus/v5/sequential_handler.go delete mode 100644 vendor/github.com/godbus/dbus/v5/server_interfaces.go delete mode 100644 vendor/github.com/godbus/dbus/v5/sig.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_darwin.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_generic.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_tcp.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_unix.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_dragonfly.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go delete mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_openbsd.go delete mode 100644 vendor/github.com/godbus/dbus/v5/variant.go delete mode 100644 vendor/github.com/godbus/dbus/v5/variant_lexer.go delete mode 100644 vendor/github.com/godbus/dbus/v5/variant_parser.go delete mode 100644 vendor/github.com/gogo/googleapis/LICENSE delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/code.pb.go delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/code.proto delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/error_details.proto delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/status.pb.go delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/status.proto delete mode 100644 vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/compare/compare.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/description/description.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/equal/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/face/face.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/face/facetest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/populate/populate.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/size/size.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/size/sizetest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/union/union.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/union/uniontest.go delete mode 100644 vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap/remap.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go delete mode 100644 vendor/github.com/gogo/protobuf/types/any.go delete mode 100644 vendor/github.com/gogo/protobuf/types/any.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/api.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/doc.go delete mode 100644 vendor/github.com/gogo/protobuf/types/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/types/duration.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/types/empty.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/field_mask.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/protosize.go delete mode 100644 vendor/github.com/gogo/protobuf/types/source_context.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/struct.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/types/type.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/wrappers.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/types/wrappers_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/vanity/command/command.go delete mode 100644 vendor/github.com/gogo/protobuf/vanity/enum.go delete mode 100644 vendor/github.com/gogo/protobuf/vanity/field.go delete mode 100644 vendor/github.com/gogo/protobuf/vanity/file.go delete mode 100644 vendor/github.com/gogo/protobuf/vanity/foreach.go delete mode 100644 vendor/github.com/gogo/protobuf/vanity/msg.go delete mode 100644 vendor/github.com/golang/groupcache/LICENSE delete mode 100644 vendor/github.com/golang/groupcache/lru/lru.go delete mode 100644 vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100644 vendor/github.com/google/go-cmp/LICENSE delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/options.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/path.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go delete mode 100644 vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/gofuzz/LICENSE delete mode 100644 vendor/github.com/google/gofuzz/README.md delete mode 100644 vendor/github.com/google/gofuzz/fuzz.go delete mode 100644 vendor/github.com/google/uuid/.travis.yml delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/google/uuid/LICENSE delete mode 100644 vendor/github.com/google/uuid/README.md delete mode 100644 vendor/github.com/google/uuid/dce.go delete mode 100644 vendor/github.com/google/uuid/doc.go delete mode 100644 vendor/github.com/google/uuid/hash.go delete mode 100644 vendor/github.com/google/uuid/marshal.go delete mode 100644 vendor/github.com/google/uuid/node.go delete mode 100644 vendor/github.com/google/uuid/node_js.go delete mode 100644 vendor/github.com/google/uuid/node_net.go delete mode 100644 vendor/github.com/google/uuid/sql.go delete mode 100644 vendor/github.com/google/uuid/time.go delete mode 100644 vendor/github.com/google/uuid/util.go delete mode 100644 vendor/github.com/google/uuid/uuid.go delete mode 100644 vendor/github.com/google/uuid/version1.go delete mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go delete mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE delete mode 100644 vendor/github.com/hashicorp/errwrap/README.md delete mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile delete mode 100644 vendor/github.com/hashicorp/go-multierror/README.md delete mode 100644 vendor/github.com/hashicorp/go-multierror/append.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/format.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go delete mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml delete mode 100644 vendor/github.com/imdario/mergo/.gitignore delete mode 100644 vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/imdario/mergo/LICENSE delete mode 100644 vendor/github.com/imdario/mergo/README.md delete mode 100644 vendor/github.com/imdario/mergo/doc.go delete mode 100644 vendor/github.com/imdario/mergo/map.go delete mode 100644 vendor/github.com/imdario/mergo/merge.go delete mode 100644 vendor/github.com/imdario/mergo/mergo.go delete mode 100644 vendor/github.com/json-iterator/go/.codecov.yml delete mode 100644 vendor/github.com/json-iterator/go/.gitignore delete mode 100644 vendor/github.com/json-iterator/go/.travis.yml delete mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock delete mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml delete mode 100644 vendor/github.com/json-iterator/go/LICENSE delete mode 100644 vendor/github.com/json-iterator/go/README.md delete mode 100644 vendor/github.com/json-iterator/go/adapter.go delete mode 100644 vendor/github.com/json-iterator/go/any.go delete mode 100644 vendor/github.com/json-iterator/go/any_array.go delete mode 100644 vendor/github.com/json-iterator/go/any_bool.go delete mode 100644 vendor/github.com/json-iterator/go/any_float.go delete mode 100644 vendor/github.com/json-iterator/go/any_int32.go delete mode 100644 vendor/github.com/json-iterator/go/any_int64.go delete mode 100644 vendor/github.com/json-iterator/go/any_invalid.go delete mode 100644 vendor/github.com/json-iterator/go/any_nil.go delete mode 100644 vendor/github.com/json-iterator/go/any_number.go delete mode 100644 vendor/github.com/json-iterator/go/any_object.go delete mode 100644 vendor/github.com/json-iterator/go/any_str.go delete mode 100644 vendor/github.com/json-iterator/go/any_uint32.go delete mode 100644 vendor/github.com/json-iterator/go/any_uint64.go delete mode 100644 vendor/github.com/json-iterator/go/build.sh delete mode 100644 vendor/github.com/json-iterator/go/config.go delete mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md delete mode 100644 vendor/github.com/json-iterator/go/iter.go delete mode 100644 vendor/github.com/json-iterator/go/iter_array.go delete mode 100644 vendor/github.com/json-iterator/go/iter_float.go delete mode 100644 vendor/github.com/json-iterator/go/iter_int.go delete mode 100644 vendor/github.com/json-iterator/go/iter_object.go delete mode 100644 vendor/github.com/json-iterator/go/iter_skip.go delete mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go delete mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go delete mode 100644 vendor/github.com/json-iterator/go/iter_str.go delete mode 100644 vendor/github.com/json-iterator/go/jsoniter.go delete mode 100644 vendor/github.com/json-iterator/go/pool.go delete mode 100644 vendor/github.com/json-iterator/go/reflect.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_array.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_map.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_native.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go delete mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go delete mode 100644 vendor/github.com/json-iterator/go/stream.go delete mode 100644 vendor/github.com/json-iterator/go/stream_float.go delete mode 100644 vendor/github.com/json-iterator/go/stream_int.go delete mode 100644 vendor/github.com/json-iterator/go/stream_str.go delete mode 100644 vendor/github.com/json-iterator/go/test.sh delete mode 100644 vendor/github.com/klauspost/compress/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/fse/README.md delete mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/compress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/fse.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/huff0/README.md delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/snappy/AUTHORS delete mode 100644 vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/klauspost/compress/snappy/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/snappy/README delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode_other.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_other.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/runbench.cmd delete mode 100644 vendor/github.com/klauspost/compress/snappy/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/history.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 vendor/github.com/miekg/pkcs11/.gitignore delete mode 100644 vendor/github.com/miekg/pkcs11/.travis.yml delete mode 100644 vendor/github.com/miekg/pkcs11/LICENSE delete mode 100644 vendor/github.com/miekg/pkcs11/Makefile.release delete mode 100644 vendor/github.com/miekg/pkcs11/README.md delete mode 100644 vendor/github.com/miekg/pkcs11/const.go delete mode 100644 vendor/github.com/miekg/pkcs11/error.go delete mode 100644 vendor/github.com/miekg/pkcs11/hsm.db delete mode 100644 vendor/github.com/miekg/pkcs11/params.go delete mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.go delete mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.h delete mode 100644 vendor/github.com/miekg/pkcs11/pkcs11f.h delete mode 100644 vendor/github.com/miekg/pkcs11/pkcs11go.h delete mode 100644 vendor/github.com/miekg/pkcs11/pkcs11t.h delete mode 100644 vendor/github.com/miekg/pkcs11/release.go delete mode 100644 vendor/github.com/miekg/pkcs11/softhsm.conf delete mode 100644 vendor/github.com/miekg/pkcs11/softhsm2.conf delete mode 100644 vendor/github.com/miekg/pkcs11/types.go delete mode 100644 vendor/github.com/miekg/pkcs11/vendor.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/.gitignore delete mode 100644 vendor/github.com/mistifyio/go-zfs/.travis.yml delete mode 100644 vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md delete mode 100644 vendor/github.com/mistifyio/go-zfs/LICENSE delete mode 100644 vendor/github.com/mistifyio/go-zfs/README.md delete mode 100644 vendor/github.com/mistifyio/go-zfs/Vagrantfile delete mode 100644 vendor/github.com/mistifyio/go-zfs/error.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/utils.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/utils_solaris.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/zfs.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/zpool.go delete mode 100644 vendor/github.com/moby/locker/LICENSE delete mode 100644 vendor/github.com/moby/locker/README.md delete mode 100644 vendor/github.com/moby/locker/locker.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/LICENSE delete mode 100644 vendor/github.com/moby/sys/mountinfo/doc.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_linux.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_unix.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go delete mode 100644 vendor/github.com/moby/sys/symlink/LICENSE delete mode 100644 vendor/github.com/moby/sys/symlink/LICENSE.APACHE delete mode 100644 vendor/github.com/moby/sys/symlink/LICENSE.BSD delete mode 100644 vendor/github.com/moby/sys/symlink/README.md delete mode 100644 vendor/github.com/moby/sys/symlink/doc.go delete mode 100644 vendor/github.com/moby/sys/symlink/fs.go delete mode 100644 vendor/github.com/moby/sys/symlink/fs_unix.go delete mode 100644 vendor/github.com/moby/sys/symlink/fs_windows.go delete mode 100644 vendor/github.com/modern-go/concurrent/.gitignore delete mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml delete mode 100644 vendor/github.com/modern-go/concurrent/LICENSE delete mode 100644 vendor/github.com/modern-go/concurrent/README.md delete mode 100644 vendor/github.com/modern-go/concurrent/executor.go delete mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go delete mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go delete mode 100644 vendor/github.com/modern-go/concurrent/log.go delete mode 100644 vendor/github.com/modern-go/concurrent/test.sh delete mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go delete mode 100644 vendor/github.com/modern-go/reflect2/.gitignore delete mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml delete mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock delete mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml delete mode 100644 vendor/github.com/modern-go/reflect2/LICENSE delete mode 100644 vendor/github.com/modern-go/reflect2/README.md delete mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go delete mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go delete mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s delete mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s delete mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s delete mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go delete mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go delete mode 100644 vendor/github.com/modern-go/reflect2/test.sh delete mode 100644 vendor/github.com/modern-go/reflect2/type_map.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go delete mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go delete mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap delete mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml delete mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml delete mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md delete mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE delete mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs delete mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS delete mode 100644 vendor/github.com/opencontainers/go-digest/README.md delete mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go delete mode 100644 vendor/github.com/opencontainers/go-digest/digest.go delete mode 100644 vendor/github.com/opencontainers/go-digest/digester.go delete mode 100644 vendor/github.com/opencontainers/go-digest/digestset/set.go delete mode 100644 vendor/github.com/opencontainers/go-digest/doc.go delete mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go delete mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE delete mode 100644 vendor/github.com/opencontainers/image-spec/identity/chainid.go delete mode 100644 vendor/github.com/opencontainers/image-spec/identity/helpers.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go delete mode 100644 vendor/github.com/opencontainers/runc/LICENSE delete mode 100644 vendor/github.com/opencontainers/runc/NOTICE delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go delete mode 100644 vendor/github.com/opencontainers/runtime-spec/LICENSE delete mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/config.go delete mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/state.go delete mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/version.go delete mode 100644 vendor/github.com/opencontainers/selinux/LICENSE delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/doc.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go delete mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md delete mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go delete mode 100644 vendor/github.com/pelletier/go-toml/.dockerignore delete mode 100644 vendor/github.com/pelletier/go-toml/.gitignore delete mode 100644 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md delete mode 100644 vendor/github.com/pelletier/go-toml/Dockerfile delete mode 100644 vendor/github.com/pelletier/go-toml/LICENSE delete mode 100644 vendor/github.com/pelletier/go-toml/Makefile delete mode 100644 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md delete mode 100644 vendor/github.com/pelletier/go-toml/README.md delete mode 100644 vendor/github.com/pelletier/go-toml/azure-pipelines.yml delete mode 100644 vendor/github.com/pelletier/go-toml/benchmark.sh delete mode 100644 vendor/github.com/pelletier/go-toml/doc.go delete mode 100644 vendor/github.com/pelletier/go-toml/example-crlf.toml delete mode 100644 vendor/github.com/pelletier/go-toml/example.toml delete mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go delete mode 100644 vendor/github.com/pelletier/go-toml/fuzz.sh delete mode 100644 vendor/github.com/pelletier/go-toml/fuzzit.sh delete mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go delete mode 100644 vendor/github.com/pelletier/go-toml/lexer.go delete mode 100644 vendor/github.com/pelletier/go-toml/localtime.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml delete mode 100644 vendor/github.com/pelletier/go-toml/marshal_test.toml delete mode 100644 vendor/github.com/pelletier/go-toml/parser.go delete mode 100644 vendor/github.com/pelletier/go-toml/position.go delete mode 100644 vendor/github.com/pelletier/go-toml/token.go delete mode 100644 vendor/github.com/pelletier/go-toml/toml.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go delete mode 100644 vendor/github.com/pkg/errors/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/github.com/pkg/errors/LICENSE delete mode 100644 vendor/github.com/pkg/errors/Makefile delete mode 100644 vendor/github.com/pkg/errors/README.md delete mode 100644 vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/github.com/pkg/errors/errors.go delete mode 100644 vendor/github.com/pkg/errors/go113.go delete mode 100644 vendor/github.com/pkg/errors/stack.go delete mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 vendor/github.com/prometheus/client_golang/LICENSE delete mode 100644 vendor/github.com/prometheus/client_golang/NOTICE delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go delete mode 100644 vendor/github.com/prometheus/client_model/LICENSE delete mode 100644 vendor/github.com/prometheus/client_model/NOTICE delete mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go delete mode 100644 vendor/github.com/prometheus/common/LICENSE delete mode 100644 vendor/github.com/prometheus/common/NOTICE delete mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 vendor/github.com/prometheus/common/model/alert.go delete mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go delete mode 100644 vendor/github.com/prometheus/common/model/fnv.go delete mode 100644 vendor/github.com/prometheus/common/model/labels.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset.go delete mode 100644 vendor/github.com/prometheus/common/model/metric.go delete mode 100644 vendor/github.com/prometheus/common/model/model.go delete mode 100644 vendor/github.com/prometheus/common/model/signature.go delete mode 100644 vendor/github.com/prometheus/common/model/silence.go delete mode 100644 vendor/github.com/prometheus/common/model/time.go delete mode 100644 vendor/github.com/prometheus/common/model/value.go delete mode 100644 vendor/github.com/prometheus/procfs/.gitignore delete mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml delete mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md delete mode 100644 vendor/github.com/prometheus/procfs/LICENSE delete mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md delete mode 100644 vendor/github.com/prometheus/procfs/Makefile delete mode 100644 vendor/github.com/prometheus/procfs/Makefile.common delete mode 100644 vendor/github.com/prometheus/procfs/NOTICE delete mode 100644 vendor/github.com/prometheus/procfs/README.md delete mode 100644 vendor/github.com/prometheus/procfs/SECURITY.md delete mode 100644 vendor/github.com/prometheus/procfs/arp.go delete mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_armx.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_others.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_x86.go delete mode 100644 vendor/github.com/prometheus/procfs/crypto.go delete mode 100644 vendor/github.com/prometheus/procfs/doc.go delete mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar delete mode 100644 vendor/github.com/prometheus/procfs/fs.go delete mode 100644 vendor/github.com/prometheus/procfs/fscache.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go delete mode 100644 vendor/github.com/prometheus/procfs/ipvs.go delete mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go delete mode 100644 vendor/github.com/prometheus/procfs/loadavg.go delete mode 100644 vendor/github.com/prometheus/procfs/mdstat.go delete mode 100644 vendor/github.com/prometheus/procfs/meminfo.go delete mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/mountstats.go delete mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go delete mode 100644 vendor/github.com/prometheus/procfs/net_dev.go delete mode 100644 vendor/github.com/prometheus/procfs/net_ip_socket.go delete mode 100644 vendor/github.com/prometheus/procfs/net_protocols.go delete mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go delete mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go delete mode 100644 vendor/github.com/prometheus/procfs/net_tcp.go delete mode 100644 vendor/github.com/prometheus/procfs/net_udp.go delete mode 100644 vendor/github.com/prometheus/procfs/net_unix.go delete mode 100644 vendor/github.com/prometheus/procfs/proc.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_io.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_status.go delete mode 100644 vendor/github.com/prometheus/procfs/schedstat.go delete mode 100644 vendor/github.com/prometheus/procfs/slab.go delete mode 100644 vendor/github.com/prometheus/procfs/stat.go delete mode 100644 vendor/github.com/prometheus/procfs/swaps.go delete mode 100644 vendor/github.com/prometheus/procfs/ttar delete mode 100644 vendor/github.com/prometheus/procfs/vm.go delete mode 100644 vendor/github.com/prometheus/procfs/xfrm.go delete mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore delete mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml delete mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt delete mode 100644 vendor/github.com/russross/blackfriday/v2/README.md delete mode 100644 vendor/github.com/russross/blackfriday/v2/block.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/html.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/node.go delete mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/README.md delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/main.go delete mode 100644 vendor/github.com/sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml delete mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/sirupsen/logrus/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go delete mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml delete mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go delete mode 100644 vendor/github.com/sirupsen/logrus/doc.go delete mode 100644 vendor/github.com/sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go delete mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/writer.go delete mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.gitignore delete mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml delete mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/LICENSE delete mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/Makefile delete mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/README.md delete mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go delete mode 100644 vendor/github.com/stretchr/testify/LICENSE delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/doc.go delete mode 100644 vendor/github.com/stretchr/testify/assert/errors.go delete mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go delete mode 100644 vendor/github.com/stretchr/testify/require/doc.go delete mode 100644 vendor/github.com/stretchr/testify/require/forward_requirements.go delete mode 100644 vendor/github.com/stretchr/testify/require/require.go delete mode 100644 vendor/github.com/stretchr/testify/require/require.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go delete mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/require/requirements.go delete mode 100644 vendor/github.com/tchap/go-patricia/AUTHORS delete mode 100644 vendor/github.com/tchap/go-patricia/LICENSE delete mode 100644 vendor/github.com/tchap/go-patricia/patricia/children.go delete mode 100644 vendor/github.com/tchap/go-patricia/patricia/patricia.go delete mode 100644 vendor/github.com/urfave/cli/.flake8 delete mode 100644 vendor/github.com/urfave/cli/.gitignore delete mode 100644 vendor/github.com/urfave/cli/.travis.yml delete mode 100644 vendor/github.com/urfave/cli/CHANGELOG.md delete mode 100644 vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/urfave/cli/CONTRIBUTING.md delete mode 100644 vendor/github.com/urfave/cli/LICENSE delete mode 100644 vendor/github.com/urfave/cli/README.md delete mode 100644 vendor/github.com/urfave/cli/app.go delete mode 100644 vendor/github.com/urfave/cli/appveyor.yml delete mode 100644 vendor/github.com/urfave/cli/category.go delete mode 100644 vendor/github.com/urfave/cli/cli.go delete mode 100644 vendor/github.com/urfave/cli/command.go delete mode 100644 vendor/github.com/urfave/cli/context.go delete mode 100644 vendor/github.com/urfave/cli/docs.go delete mode 100644 vendor/github.com/urfave/cli/errors.go delete mode 100644 vendor/github.com/urfave/cli/fish.go delete mode 100644 vendor/github.com/urfave/cli/flag.go delete mode 100644 vendor/github.com/urfave/cli/flag_bool.go delete mode 100644 vendor/github.com/urfave/cli/flag_bool_t.go delete mode 100644 vendor/github.com/urfave/cli/flag_duration.go delete mode 100644 vendor/github.com/urfave/cli/flag_float64.go delete mode 100644 vendor/github.com/urfave/cli/flag_generic.go delete mode 100644 vendor/github.com/urfave/cli/flag_int.go delete mode 100644 vendor/github.com/urfave/cli/flag_int64.go delete mode 100644 vendor/github.com/urfave/cli/flag_int64_slice.go delete mode 100644 vendor/github.com/urfave/cli/flag_int_slice.go delete mode 100644 vendor/github.com/urfave/cli/flag_string.go delete mode 100644 vendor/github.com/urfave/cli/flag_string_slice.go delete mode 100644 vendor/github.com/urfave/cli/flag_uint.go delete mode 100644 vendor/github.com/urfave/cli/flag_uint64.go delete mode 100644 vendor/github.com/urfave/cli/funcs.go delete mode 100644 vendor/github.com/urfave/cli/help.go delete mode 100644 vendor/github.com/urfave/cli/parse.go delete mode 100644 vendor/github.com/urfave/cli/sort.go delete mode 100644 vendor/github.com/urfave/cli/template.go delete mode 100644 vendor/go.etcd.io/bbolt/.gitignore delete mode 100644 vendor/go.etcd.io/bbolt/.travis.yml delete mode 100644 vendor/go.etcd.io/bbolt/LICENSE delete mode 100644 vendor/go.etcd.io/bbolt/Makefile delete mode 100644 vendor/go.etcd.io/bbolt/README.md delete mode 100644 vendor/go.etcd.io/bbolt/bolt_386.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_amd64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_linux.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mips64x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mipsx.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_openbsd.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64le.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_riscv64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_s390x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_solaris.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_windows.go delete mode 100644 vendor/go.etcd.io/bbolt/boltsync_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bucket.go delete mode 100644 vendor/go.etcd.io/bbolt/cursor.go delete mode 100644 vendor/go.etcd.io/bbolt/db.go delete mode 100644 vendor/go.etcd.io/bbolt/doc.go delete mode 100644 vendor/go.etcd.io/bbolt/errors.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go delete mode 100644 vendor/go.etcd.io/bbolt/node.go delete mode 100644 vendor/go.etcd.io/bbolt/page.go delete mode 100644 vendor/go.etcd.io/bbolt/tx.go delete mode 100644 vendor/go.etcd.io/bbolt/unsafe.go delete mode 100644 vendor/go.mozilla.org/pkcs7/.gitignore delete mode 100644 vendor/go.mozilla.org/pkcs7/.travis.yml delete mode 100644 vendor/go.mozilla.org/pkcs7/LICENSE delete mode 100644 vendor/go.mozilla.org/pkcs7/Makefile delete mode 100644 vendor/go.mozilla.org/pkcs7/README.md delete mode 100644 vendor/go.mozilla.org/pkcs7/ber.go delete mode 100644 vendor/go.mozilla.org/pkcs7/decrypt.go delete mode 100644 vendor/go.mozilla.org/pkcs7/encrypt.go delete mode 100644 vendor/go.mozilla.org/pkcs7/pkcs7.go delete mode 100644 vendor/go.mozilla.org/pkcs7/sign.go delete mode 100644 vendor/go.mozilla.org/pkcs7/verify.go delete mode 100644 vendor/go.opencensus.io/.gitignore delete mode 100644 vendor/go.opencensus.io/.travis.yml delete mode 100644 vendor/go.opencensus.io/AUTHORS delete mode 100644 vendor/go.opencensus.io/CONTRIBUTING.md delete mode 100644 vendor/go.opencensus.io/LICENSE delete mode 100644 vendor/go.opencensus.io/Makefile delete mode 100644 vendor/go.opencensus.io/README.md delete mode 100644 vendor/go.opencensus.io/appveyor.yml delete mode 100644 vendor/go.opencensus.io/internal/internal.go delete mode 100644 vendor/go.opencensus.io/internal/sanitize.go delete mode 100644 vendor/go.opencensus.io/internal/traceinternals.go delete mode 100644 vendor/go.opencensus.io/opencensus.go delete mode 100644 vendor/go.opencensus.io/trace/basetypes.go delete mode 100644 vendor/go.opencensus.io/trace/config.go delete mode 100644 vendor/go.opencensus.io/trace/doc.go delete mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go delete mode 100644 vendor/go.opencensus.io/trace/export.go delete mode 100644 vendor/go.opencensus.io/trace/internal/internal.go delete mode 100644 vendor/go.opencensus.io/trace/lrumap.go delete mode 100644 vendor/go.opencensus.io/trace/sampling.go delete mode 100644 vendor/go.opencensus.io/trace/spanbucket.go delete mode 100644 vendor/go.opencensus.io/trace/spanstore.go delete mode 100644 vendor/go.opencensus.io/trace/status_codes.go delete mode 100644 vendor/go.opencensus.io/trace/trace.go delete mode 100644 vendor/go.opencensus.io/trace/trace_go11.go delete mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go delete mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go delete mode 100644 vendor/golang.org/x/crypto/AUTHORS delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/golang.org/x/net/AUTHORS delete mode 100644 vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go delete mode 100644 vendor/golang.org/x/net/http/httpguts/httplex.go delete mode 100644 vendor/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables12.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/trie.go delete mode 100644 vendor/golang.org/x/net/idna/trieval.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/golang.org/x/net/trace/events.go delete mode 100644 vendor/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/golang.org/x/net/trace/trace.go delete mode 100644 vendor/golang.org/x/net/websocket/client.go delete mode 100644 vendor/golang.org/x/net/websocket/dial.go delete mode 100644 vendor/golang.org/x/net/websocket/hybi.go delete mode 100644 vendor/golang.org/x/net/websocket/server.go delete mode 100644 vendor/golang.org/x/net/websocket/websocket.go delete mode 100644 vendor/golang.org/x/oauth2/.travis.yml delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/oauth2/LICENSE delete mode 100644 vendor/golang.org/x/oauth2/README.md delete mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go delete mode 100644 vendor/golang.org/x/oauth2/internal/doc.go delete mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/internal/token.go delete mode 100644 vendor/golang.org/x/oauth2/internal/transport.go delete mode 100644 vendor/golang.org/x/oauth2/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/token.go delete mode 100644 vendor/golang.org/x/oauth2/transport.go delete mode 100644 vendor/golang.org/x/sync/AUTHORS delete mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go delete mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 100644 vendor/golang.org/x/sys/AUTHORS delete mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sys/LICENSE delete mode 100644 vendor/golang.org/x/sys/PATENTS delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go delete mode 100644 vendor/golang.org/x/sys/plan9/asm.s delete mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_386.s delete mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s delete mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_arm.s delete mode 100644 vendor/golang.org/x/sys/plan9/const_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/dir_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/env_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/errors_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/mkall.sh delete mode 100644 vendor/golang.org/x/sys/plan9/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/race.go delete mode 100644 vendor/golang.org/x/sys/plan9/race0.go delete mode 100644 vendor/golang.org/x/sys/plan9/str.go delete mode 100644 vendor/golang.org/x/sys/plan9/syscall.go delete mode 100644 vendor/golang.org/x/sys/plan9/syscall_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsysnum_plan9.go delete mode 100644 vendor/golang.org/x/sys/unix/.gitignore delete mode 100644 vendor/golang.org/x/sys/unix/README.md delete mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/aliases.go delete mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/constants.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/dirent.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_big.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_little.go delete mode 100644 vendor/golang.org/x/sys/unix/env_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go delete mode 100644 vendor/golang.org/x/sys/unix/fdset.go delete mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/mkall.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go delete mode 100644 vendor/golang.org/x/sys/unix/race.go delete mode 100644 vendor/golang.org/x/sys/unix/race0.go delete mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go delete mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go delete mode 100644 vendor/golang.org/x/sys/unix/str.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/timestruct.go delete mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_x86_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/empty.s delete mode 100644 vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash delete mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash delete mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/race.go delete mode 100644 vendor/golang.org/x/sys/windows/race0.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/key.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/value.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/service.go delete mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/str.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/debug/log.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/debug/service.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/event.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/go12.c delete mode 100644 vendor/golang.org/x/sys/windows/svc/go12.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/go13.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/config.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/mgr.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/recovery.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/service.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/security.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/service.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/sys_windows_386.s delete mode 100644 vendor/golang.org/x/sys/windows/svc/sys_windows_amd64.s delete mode 100644 vendor/golang.org/x/sys/windows/svc/sys_windows_arm.s delete mode 100644 vendor/golang.org/x/sys/windows/svc/sys_windows_arm64.s delete mode 100644 vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go delete mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/golang.org/x/term/AUTHORS delete mode 100644 vendor/golang.org/x/term/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/term/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/term/LICENSE delete mode 100644 vendor/golang.org/x/term/PATENTS delete mode 100644 vendor/golang.org/x/term/README.md delete mode 100644 vendor/golang.org/x/term/term.go delete mode 100644 vendor/golang.org/x/term/term_plan9.go delete mode 100644 vendor/golang.org/x/term/term_solaris.go delete mode 100644 vendor/golang.org/x/term/term_unix.go delete mode 100644 vendor/golang.org/x/term/term_unix_aix.go delete mode 100644 vendor/golang.org/x/term/term_unix_bsd.go delete mode 100644 vendor/golang.org/x/term/term_unix_linux.go delete mode 100644 vendor/golang.org/x/term/term_unix_zos.go delete mode 100644 vendor/golang.org/x/term/term_unsupported.go delete mode 100644 vendor/golang.org/x/term/term_windows.go delete mode 100644 vendor/golang.org/x/term/terminal.go delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/LICENSE delete mode 100644 vendor/golang.org/x/text/PATENTS delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go delete mode 100644 vendor/golang.org/x/text/transform/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/input.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go delete mode 100644 vendor/golang.org/x/time/AUTHORS delete mode 100644 vendor/golang.org/x/time/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/time/LICENSE delete mode 100644 vendor/golang.org/x/time/PATENTS delete mode 100644 vendor/golang.org/x/time/rate/rate.go delete mode 100644 vendor/golang.org/x/xerrors/LICENSE delete mode 100644 vendor/golang.org/x/xerrors/PATENTS delete mode 100644 vendor/golang.org/x/xerrors/README delete mode 100644 vendor/golang.org/x/xerrors/adaptor.go delete mode 100644 vendor/golang.org/x/xerrors/codereview.cfg delete mode 100644 vendor/golang.org/x/xerrors/doc.go delete mode 100644 vendor/golang.org/x/xerrors/errors.go delete mode 100644 vendor/golang.org/x/xerrors/fmt.go delete mode 100644 vendor/golang.org/x/xerrors/format.go delete mode 100644 vendor/golang.org/x/xerrors/frame.go delete mode 100644 vendor/golang.org/x/xerrors/internal/internal.go delete mode 100644 vendor/golang.org/x/xerrors/wrap.go delete mode 100644 vendor/google.golang.org/appengine/LICENSE delete mode 100644 vendor/google.golang.org/appengine/internal/api.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/main.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 vendor/google.golang.org/appengine/internal/net.go delete mode 100644 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto delete mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go delete mode 100644 vendor/google.golang.org/genproto/LICENSE delete mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml delete mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md delete mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md delete mode 100644 vendor/google.golang.org/grpc/LICENSE delete mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md delete mode 100644 vendor/google.golang.org/grpc/Makefile delete mode 100644 vendor/google.golang.org/grpc/README.md delete mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go delete mode 100644 vendor/google.golang.org/grpc/backoff.go delete mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go delete mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go delete mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go delete mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go delete mode 100644 vendor/google.golang.org/grpc/call.go delete mode 100644 vendor/google.golang.org/grpc/clientconn.go delete mode 100644 vendor/google.golang.org/grpc/codec.go delete mode 100644 vendor/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/google.golang.org/grpc/codes/codes.go delete mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/credentials/go12.go delete mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go delete mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go delete mode 100644 vendor/google.golang.org/grpc/credentials/tls.go delete mode 100644 vendor/google.golang.org/grpc/dialoptions.go delete mode 100644 vendor/google.golang.org/grpc/doc.go delete mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go delete mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go delete mode 100644 vendor/google.golang.org/grpc/health/client.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go delete mode 100644 vendor/google.golang.org/grpc/health/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/health/server.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh delete mode 100644 vendor/google.golang.org/grpc/interceptor.go delete mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go delete mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go delete mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go delete mode 100644 vendor/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go delete mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go delete mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go delete mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/pickfirst.go delete mode 100644 vendor/google.golang.org/grpc/preloader.go delete mode 100644 vendor/google.golang.org/grpc/proxy.go delete mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/rpc_util.go delete mode 100644 vendor/google.golang.org/grpc/server.go delete mode 100644 vendor/google.golang.org/grpc/service_config.go delete mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go delete mode 100644 vendor/google.golang.org/grpc/stats/handlers.go delete mode 100644 vendor/google.golang.org/grpc/stats/stats.go delete mode 100644 vendor/google.golang.org/grpc/status/status.go delete mode 100644 vendor/google.golang.org/grpc/stream.go delete mode 100644 vendor/google.golang.org/grpc/tap/tap.go delete mode 100644 vendor/google.golang.org/grpc/trace.go delete mode 100644 vendor/google.golang.org/grpc/version.go delete mode 100644 vendor/google.golang.org/grpc/vet.sh delete mode 100644 vendor/gopkg.in/inf.v0/LICENSE delete mode 100644 vendor/gopkg.in/inf.v0/dec.go delete mode 100644 vendor/gopkg.in/inf.v0/rounder.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc delete mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitignore delete mode 100644 vendor/gopkg.in/square/go-jose.v2/.travis.yml delete mode 100644 vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v2/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/asymmetric.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/crypter.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/doc.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/encoding.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/decode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/encode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/indent.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/scanner.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/stream.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/tags.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/jwe.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/jwk.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/jws.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/opaque.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/shared.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/signing.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/symmetric.go delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 vendor/gopkg.in/yaml.v2/README.md delete mode 100644 vendor/gopkg.in/yaml.v2/apic.go delete mode 100644 vendor/gopkg.in/yaml.v2/decode.go delete mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v2/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go delete mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v3/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v3/README.md delete mode 100644 vendor/gopkg.in/yaml.v3/apic.go delete mode 100644 vendor/gopkg.in/yaml.v3/decode.go delete mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v3/encode.go delete mode 100644 vendor/gopkg.in/yaml.v3/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v3/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v3/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go delete mode 100644 vendor/gotest.tools/v3/assert/assert.go delete mode 100644 vendor/gotest.tools/v3/assert/cmp/compare.go delete mode 100644 vendor/gotest.tools/v3/assert/cmp/result.go delete mode 100644 vendor/gotest.tools/v3/internal/assert/assert.go delete mode 100644 vendor/gotest.tools/v3/internal/assert/result.go delete mode 100644 vendor/gotest.tools/v3/internal/difflib/LICENSE delete mode 100644 vendor/gotest.tools/v3/internal/difflib/difflib.go delete mode 100644 vendor/gotest.tools/v3/internal/format/diff.go delete mode 100644 vendor/gotest.tools/v3/internal/format/format.go delete mode 100644 vendor/gotest.tools/v3/internal/source/defers.go delete mode 100644 vendor/gotest.tools/v3/internal/source/source.go create mode 100644 vendor/k8s.io/api/authentication/v1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go rename vendor/{sigs.k8s.io/yaml => k8s.io/apimachinery/pkg/api/meta}/OWNERS (53%) create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go rename vendor/{gopkg.in/yaml.v2/NOTICE => k8s.io/apimachinery/pkg/api/meta/doc.go} (70%) create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/help.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/priority.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go rename vendor/{gotest.tools/v3/LICENSE => k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go} (80%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go rename vendor/{gopkg.in/yaml.v3/NOTICE => k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go} (89%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go rename vendor/{golang.org/x/crypto => k8s.io/apimachinery/third_party/forked/golang}/LICENSE (100%) rename vendor/{golang.org/x/crypto => k8s.io/apimachinery/third_party/forked/golang}/PATENTS (100%) create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS rename vendor/{github.com/google/gofuzz => k8s.io/apiserver/pkg/apis/audit}/doc.go (77%) create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/types.go rename vendor/{sigs.k8s.io/structured-merge-diff/v4/value => k8s.io/apiserver/pkg/apis/audit/v1}/doc.go (67%) create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/audit/context.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/format.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/request.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/scheme.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/auditid.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/context.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/features/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/def.bzl create mode 100644 vendor/k8s.io/client-go/rest/with_retry.go create mode 100644 vendor/k8s.io/component-base/featuregate/feature_gate.go create mode 100644 vendor/k8s.io/component-base/metrics/OWNERS create mode 100644 vendor/k8s.io/component-base/metrics/collector.go create mode 100644 vendor/k8s.io/component-base/metrics/counter.go create mode 100644 vendor/k8s.io/component-base/metrics/desc.go create mode 100644 vendor/k8s.io/component-base/metrics/gauge.go create mode 100644 vendor/k8s.io/component-base/metrics/histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/http.go create mode 100644 vendor/k8s.io/component-base/metrics/labels.go create mode 100644 vendor/k8s.io/component-base/metrics/legacyregistry/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/metric.go create mode 100644 vendor/k8s.io/component-base/metrics/options.go create mode 100644 vendor/k8s.io/component-base/metrics/opts.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_others.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_windows.go create mode 100644 vendor/k8s.io/component-base/metrics/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/summary.go create mode 100644 vendor/k8s.io/component-base/metrics/value.go create mode 100644 vendor/k8s.io/component-base/metrics/version.go create mode 100644 vendor/k8s.io/component-base/metrics/version_parser.go create mode 100644 vendor/k8s.io/component-base/metrics/wrappers.go create mode 100644 vendor/k8s.io/component-base/version/.gitattributes create mode 100644 vendor/k8s.io/component-base/version/base.go create mode 100644 vendor/k8s.io/component-base/version/version.go create mode 100644 vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go create mode 100644 vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto create mode 100644 vendor/k8s.io/cri-api/pkg/apis/runtime/v1/constants.go delete mode 100644 vendor/k8s.io/klog/v2/.gitignore delete mode 100644 vendor/k8s.io/klog/v2/CONTRIBUTING.md delete mode 100644 vendor/k8s.io/klog/v2/LICENSE delete mode 100644 vendor/k8s.io/klog/v2/OWNERS delete mode 100644 vendor/k8s.io/klog/v2/README.md delete mode 100644 vendor/k8s.io/klog/v2/RELEASE.md delete mode 100644 vendor/k8s.io/klog/v2/SECURITY.md delete mode 100644 vendor/k8s.io/klog/v2/SECURITY_CONTACTS delete mode 100644 vendor/k8s.io/klog/v2/code-of-conduct.md delete mode 100644 vendor/k8s.io/klog/v2/klog.go delete mode 100644 vendor/k8s.io/klog/v2/klog_file.go create mode 100644 vendor/k8s.io/utils/clock/README.md create mode 100644 vendor/k8s.io/utils/clock/clock.go create mode 100644 vendor/k8s.io/utils/clock/testing/fake_clock.go create mode 100644 vendor/k8s.io/utils/clock/testing/simple_interval_clock.go delete mode 100644 vendor/modules.txt delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go delete mode 100644 vendor/sigs.k8s.io/yaml/.gitignore delete mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md delete mode 100644 vendor/sigs.k8s.io/yaml/LICENSE delete mode 100644 vendor/sigs.k8s.io/yaml/README.md delete mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md delete mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS delete mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md delete mode 100644 vendor/sigs.k8s.io/yaml/fields.go delete mode 100644 vendor/sigs.k8s.io/yaml/yaml.go delete mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 31b6551..0000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -name: Bug report -about: Create a bug report to help improve containerd -title: '' -labels: kind/bug -assignees: '' ---- - - - -**Description** - - - -**Steps to reproduce the issue:** -1. -2. -3. - -**Describe the results you received:** - - -**Describe the results you expected:** - - -**What version of containerd are you using:** - -``` -$ containerd --version - -``` - -**Any other relevant information (runC version, CRI configuration, OS/Kernel version, etc.):** - - - -
runc --version
-$ runc --version
-
-
- - - -
crictl info
-$ crictl info
-
-
- - -
uname -a
-$ uname -a
-
-
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 0000000..7e58f03 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,70 @@ +name: Bug report +description: Create a bug report to help improve containerd +labels: kind/bug +body: + - type: markdown + attributes: + value: | + If you are reporting a new issue, make sure that we do not have any duplicates + already open. You can ensure this by searching the issue list for this + repository. If there is a duplicate, please close your issue and add a comment + to the existing issue instead. + + Please have a look on the following tips before opening the issue: + +
+ * If containerd gets stuck on something and enables debug socket, `ctr pprof goroutines` + dumps the golang stack of containerd, which is helpful! If containerd runs + without debug socket, `kill -SIGUSR1 $(pidof containerd)` also dumps the stack + as well. + + * If there is something about running containerd, like consuming more CPU resources, + `ctr pprof` subcommands will help you to get some useful profiles. Enable debug + socket makes life easier. + + * `ctr` can't be used for testing CRI configs, as it does not use CRI API. +
+ + - type: textarea + attributes: + label: Description + description: | + Briefly describe the problem you are having in a few paragraphs. + validations: + required: true + + - type: textarea + attributes: + label: Steps to reproduce the issue + value: | + 1. + 2. + 3. + + - type: textarea + attributes: + label: Describe the results you received and expected + validations: + required: true + + - type: input + attributes: + label: What version of containerd are you using? + placeholder: $ containerd --version + validations: + required: true + + - type: textarea + attributes: + label: Any other relevant information + description: | + runc version, CRI configuration, OS/Kernel version, etc. + Use the following commands: + $ runc --version + $ crictl info (if you use Kubernetes) + $ uname -a + + - type: textarea + attributes: + label: Show configuration if it is related to CRI plugin. + placeholder: $ cat /etc/containerd/config.toml diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index e123919..0000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for containerd -title: '' -labels: kind/feature -assignees: '' ---- - -**What is the problem you're trying to solve** -A clear and concise description of what the problem is. - -**Describe the solution you'd like** -A clear and concise description of what you'd like to happen. - -**Additional context** -Add any other context about the feature request here. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 0000000..161656e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,25 @@ +name: Feature request +description: Suggest an idea for containerd +labels: kind/feature +body: + - type: textarea + attributes: + label: What is the problem you're trying to solve + description: | + A clear and concise description of what the problem is. + validations: + required: true + + - type: textarea + attributes: + label: Describe the solution you'd like + description: | + A clear and concise description of what you'd like to happen. + validations: + required: true + + - type: textarea + attributes: + label: Additional context + description: | + Add any other context about the feature request here. diff --git a/.github/workflows/build-test-images.yml b/.github/workflows/build-test-images.yml new file mode 100644 index 0000000..940312a --- /dev/null +++ b/.github/workflows/build-test-images.yml @@ -0,0 +1,167 @@ +name: "Build volume test images" +on: + workflow_dispatch: + inputs: + push_to_project: + description: "Project to build images for" + required: true + default: "ghcr.io/containerd" + azure_windows_image_id: + description: Windows image URN to deploy + required: true + default: MicrosoftWindowsServer:WindowsServer:2022-datacenter:20348.350.2111030009 + azure_vm_size: + description: Windows image builder VM size + required: true + default: Standard_D2s_v3 + azure_location: + description: The Azure region to deploy to + required: true + default: westeurope + +permissions: + contents: read + +env: + AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }} + DEFAULT_ADMIN_USERNAME: azureuser + SSH_OPTS: "-o ServerAliveInterval=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + AZURE_RESOURCE_GROUP: ctrd-test-image-build-${{ github.run_id }} + +jobs: + images: + permissions: + packages: write + name: "Build volume test images" + runs-on: ubuntu-latest + defaults: + run: + working-directory: src/github.com/containerd/containerd + + steps: + - uses: actions/setup-go@v3 + with: + go-version: "1.20.8" + + - uses: actions/checkout@v3 + with: + path: src/github.com/containerd/containerd + + - name: Set env + shell: bash + run: | + echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV + echo "${{ github.workspace }}/bin" >> $GITHUB_PATH + + - name: Install docker + shell: bash + run: | + sudo apt update + sudo apt install -y ca-certificates curl gnupg lsb-release + curl -fsSL https://download.docker.com/linux/ubuntu/gpg > /tmp/docker.gpg + sudo gpg --yes --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg /tmp/docker.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt update + sudo apt install -y docker-ce docker-ce-cli containerd.io jq + sudo adduser $USER docker + + - name: Generate ssh key pair + run: | + mkdir -p $HOME/.ssh/ + ssh-keygen -t rsa -b 4096 -C "ci@containerd.com" -f $HOME/.ssh/id_rsa -q -N "" + echo "SSH_PUB_KEY=$(cat ~/.ssh/id_rsa.pub)" >> $GITHUB_ENV + + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDS }} + + - name: Create Azure Resource Group + uses: azure/CLI@v1 + with: + inlinescript: | + az group create -n ${{ env.AZURE_RESOURCE_GROUP }} -l ${{ github.event.inputs.azure_location }} --tags creationTimestamp=$(date +%Y-%m-%dT%T%z) + + - name: Create Windows Helper VM + uses: azure/CLI@v1 + with: + inlinescript: | + PASSWORD="$(/usr/bin/tr -dc "a-zA-Z0-9@#$%^&*()_+?><~\`;" < /dev/urandom | /usr/bin/head -c 24; echo '')" + az vm create -n WinDockerHelper \ + --admin-username ${{ env.DEFAULT_ADMIN_USERNAME }} \ + --public-ip-sku Basic \ + --admin-password "::add-mask::$PASSWORD" \ + --image ${{ github.event.inputs.azure_windows_image_id }} \ + -g ${{ env.AZURE_RESOURCE_GROUP }} \ + --size ${{ github.event.inputs.azure_vm_size }} + az vm open-port --resource-group ${{ env.AZURE_RESOURCE_GROUP }} --name WinDockerHelper --port 22 --priority 101 + az vm open-port --resource-group ${{ env.AZURE_RESOURCE_GROUP }} --name WinDockerHelper --port 2376 --priority 102 + + - name: Prepare Windows image helper + uses: azure/CLI@v1 + with: + inlinescript: | + # Installs Windows features, opens SSH and Docker port + az vm run-command invoke \ + --command-id RunPowerShellScript \ + -n WinDockerHelper \ + -g ${{ env.AZURE_RESOURCE_GROUP }} \ + --scripts @$GITHUB_WORKSPACE/src/github.com/containerd/containerd/script/setup/prepare_windows_docker_helper.ps1 + # The prepare_windows_docker_helper.ps1 script reboots the server after enabling the Windows features + # Give it a chance to reboot. Running another run-command via azure CLI should work even without this + # sleep, but we want to avoid the possibility that it may run before the server reboots. + sleep 30 + # Enable SSH and import public key + az vm run-command invoke \ + --command-id RunPowerShellScript \ + -n WinDockerHelper \ + -g ${{ env.AZURE_RESOURCE_GROUP }} \ + --scripts @$GITHUB_WORKSPACE/src/github.com/containerd/containerd/script/setup/enable_ssh_windows.ps1 \ + --parameters 'SSHPublicKey=${{ env.SSH_PUB_KEY }}' + + - name: Get Windows Helper IPs + uses: azure/CLI@v1 + with: + inlinescript: | + VM_DETAILS=$(az vm show -d -g ${{ env.AZURE_RESOURCE_GROUP }} -n WinDockerHelper -o json) + echo "PUBLIC_IP=$(echo $VM_DETAILS | jq -r .publicIps)" >> $GITHUB_ENV + echo "PRIVATE_IP=$(echo $VM_DETAILS | jq -r .privateIps)" >> $GITHUB_ENV + + - name: Enable Docker TLS + shell: bash + run: | + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} $GITHUB_WORKSPACE/src/github.com/containerd/containerd/script/setup/enable_docker_tls_on_windows.ps1 azureuser@${{ env.PUBLIC_IP }}:/enable_docker_tls_on_windows.ps1 + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }} "powershell.exe -command { C:/enable_docker_tls_on_windows.ps1 -IPAddresses ${{ env.PUBLIC_IP }},${{ env.PRIVATE_IP }} }" + + - name: Fetch client certificate and key + shell: bash + run: | + mkdir -p $HOME/.docker + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }}:/Users/azureuser/.docker/ca.pem $HOME/.docker/ca.pem + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }}:/Users/azureuser/.docker/cert.pem $HOME/.docker/cert.pem + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }}:/Users/azureuser/.docker/key.pem $HOME/.docker/key.pem + + - name: Login to GitHub Container Registry + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push images + shell: bash + run: | + make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-copy-up setup-buildx + + make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-copy-up build-registry PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376 + make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-copy-up push-manifest PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376 + + make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-ownership build-registry PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376 + make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-ownership push-manifest PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376 + + - name: Cleanup resources + if: always() + uses: azure/CLI@v1 + with: + inlinescript: | + az group delete -g ${{ env.AZURE_RESOURCE_GROUP }} --yes diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c2944e0..93b4d3a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,77 +2,91 @@ name: CI on: push: branches: - - master + - main - 'release/**' pull_request: branches: - - master + - main - 'release/**' +env: + # Go version we currently use to build containerd across all CI. + # Note: don't forget to update `Binaries` step, as it contains the matrix of all supported Go versions. + GO_VERSION: "1.20.8" + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + jobs: # # golangci-lint # linters: + permissions: + contents: read # for actions/checkout to fetch code + pull-requests: read # for golangci/golangci-lint-action to fetch pull requests name: Linters runs-on: ${{ matrix.os }} timeout-minutes: 10 strategy: matrix: - go-version: [1.16.12] - os: [ubuntu-18.04, macos-10.15, windows-2019] + os: [ubuntu-20.04, macos-12, windows-2019] steps: - - uses: actions/checkout@v2 - with: - path: src/github.com/containerd/containerd - - - name: Set env - shell: bash + - name: Install dependencies + if: matrix.os == 'ubuntu-20.04' run: | - echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "${{ github.workspace }}/bin" >> $GITHUB_PATH + sudo apt-get update + sudo apt-get install -y libbtrfs-dev - - uses: golangci/golangci-lint-action@v2 + - uses: actions/setup-go@v3 with: - version: v1.36.0 - working-directory: src/github.com/containerd/containerd - args: --timeout=5m + go-version: ${{ env.GO_VERSION }} + + - uses: actions/checkout@v3 + - uses: golangci/golangci-lint-action@v3 + with: + version: v1.51.1 + skip-cache: true + args: --timeout=8m # # Project checks # project: name: Project Checks - runs-on: ubuntu-18.04 + if: github.repository == 'containerd/containerd' + runs-on: ubuntu-20.04 timeout-minutes: 5 steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' + go-version: ${{ env.GO_VERSION }} - - shell: bash - run: | - echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: src/github.com/containerd/containerd fetch-depth: 100 - - uses: containerd/project-checks@v1 + - uses: containerd/project-checks@v1.1.0 with: working-directory: src/github.com/containerd/containerd + repo-access-token: ${{ secrets.GITHUB_TOKEN }} + + - name: verify go modules and vendor directory + run: | + sudo apt-get install -y jq + make verify-vendor + working-directory: src/github.com/containerd/containerd # # Protobuf checks # protos: name: Protobuf - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 5 defaults: @@ -80,11 +94,11 @@ jobs: working-directory: src/github.com/containerd/containerd steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' + go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: src/github.com/containerd/containerd @@ -92,7 +106,6 @@ jobs: shell: bash run: | echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "GO111MODULE=off" >> $GITHUB_ENV echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - name: Install protobuf @@ -109,34 +122,22 @@ jobs: man: name: Manpages - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 5 steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' - - - name: Set env - shell: bash - run: | - echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - - - uses: actions/checkout@v2 - with: - path: src/github.com/containerd/containerd - - - run: GO111MODULE=on go get github.com/cpuguy83/go-md2man/v2@v2.0.0 - + go-version: ${{ env.GO_VERSION }} + - uses: actions/checkout@v3 + - run: go install github.com/cpuguy83/go-md2man/v2@v2.0.1 - run: make man - working-directory: src/github.com/containerd/containerd # Make sure binaries compile with other platforms crossbuild: name: Crossbuild Binaries - needs: [project, linters, protos, man] - runs-on: ubuntu-18.04 + needs: [linters, protos, man] + runs-on: ubuntu-20.04 timeout-minutes: 10 strategy: fail-fast: false @@ -150,6 +151,10 @@ jobs: - goos: linux goarch: arm goarm: "5" + - goos: linux + goarch: ppc64le + - goos: linux + goarch: riscv64 - goos: freebsd goarch: amd64 - goos: freebsd @@ -159,21 +164,14 @@ jobs: goarm: "7" steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' - - name: Set env - shell: bash - run: | - echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - - uses: actions/checkout@v2 - with: - path: src/github.com/containerd/containerd + go-version: ${{ env.GO_VERSION }} + - uses: actions/checkout@v3 - run: | set -e -x - packages="" + packages="libbtrfs-dev" platform="${{matrix.goos}}/${{matrix.goarch}}" if [ -n "${{matrix.goarm}}" ]; then platform+="/v${{matrix.goarm}}" @@ -195,6 +193,16 @@ jobs: echo "CGO_ENABLED=1" >> $GITHUB_ENV echo "CC=aarch64-linux-gnu-gcc" >> $GITHUB_ENV ;; + linux/ppc64le) + packages+=" crossbuild-essential-ppc64el" + echo "CGO_ENABLED=1" >> $GITHUB_ENV + echo "CC=powerpc64le-linux-gnu-gcc" >> $GITHUB_ENV + ;; + linux/riscv64) + packages+=" crossbuild-essential-riscv64" + echo "CGO_ENABLED=1" >> $GITHUB_ENV + echo "CC=riscv64-linux-gnu-gcc" >> $GITHUB_ENV + ;; windows/arm/v7) echo "CGO_ENABLED=0" >> $GITHUB_ENV ;; @@ -203,9 +211,8 @@ jobs: if [ -n "${packages}" ]; then sudo apt-get update && sudo apt-get install -y ${packages} fi - name: install deps + name: Install deps - name: Build - working-directory: src/github.com/containerd/containerd env: GOOS: ${{matrix.goos}} GOARCH: ${{matrix.goarch}} @@ -221,19 +228,20 @@ jobs: name: Binaries runs-on: ${{ matrix.os }} timeout-minutes: 10 - needs: [project, linters, protos, man] + needs: [linters, protos, man] strategy: matrix: - os: [ubuntu-18.04, macos-10.15, windows-2019] - go-version: ['1.16.12'] - include: - # Go 1.13.x is still used by Docker/Moby - - go-version: '1.13.x' - os: ubuntu-18.04 - + os: [ubuntu-20.04, macos-12, windows-2019, windows-2022] + go-version: ["1.20.8", "1.19.12"] steps: - - uses: actions/setup-go@v2 + - name: Install dependencies + if: matrix.os == 'ubuntu-20.04' + run: | + sudo apt-get update + sudo apt-get install -y libbtrfs-dev + + - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} @@ -243,7 +251,7 @@ jobs: echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: src/github.com/containerd/containerd @@ -258,27 +266,32 @@ jobs: # integration-windows: name: Windows Integration - runs-on: windows-2019 - timeout-minutes: 30 - needs: [project, linters, protos, man] + runs-on: ${{ matrix.os }} + timeout-minutes: 35 + needs: [linters, protos, man] env: GOTEST: gotestsum -- + strategy: + fail-fast: false + matrix: + os: [windows-2019, windows-2022] + defaults: run: shell: bash working-directory: src/github.com/containerd/containerd steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' + go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: src/github.com/containerd/containerd - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: Microsoft/hcsshim path: src/github.com/Microsoft/hcsshim @@ -321,10 +334,11 @@ jobs: - name: Integration 2 env: TESTFLAGS_PARALLEL: 1 + EXTRA_TESTFLAGS: "-short" CGO_ENABLED: 1 GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-parallel-junit.xml run: mingw32-make.exe integration - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: always() with: name: TestResults Windows @@ -333,9 +347,9 @@ jobs: integration-linux: name: Linux Integration - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 40 - needs: [project, linters, protos, man] + needs: [linters, protos, man] strategy: fail-fast: false @@ -351,116 +365,88 @@ jobs: env: GOTEST: gotestsum -- steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' + go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v2 - with: - path: src/github.com/containerd/containerd - - - name: Set env - run: | - echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "${{ github.workspace }}/bin" >> $GITHUB_PATH + - uses: actions/checkout@v3 - name: Install containerd dependencies env: RUNC_FLAVOR: ${{ matrix.runc }} run: | - sudo apt-get install -y gperf - sudo -E PATH=$PATH script/setup/install-seccomp - sudo -E PATH=$PATH script/setup/install-runc - sudo -E PATH=$PATH script/setup/install-cni - sudo -E PATH=$PATH script/setup/install-critools - working-directory: src/github.com/containerd/containerd + sudo apt-get install -y gperf libbtrfs-dev + script/setup/install-seccomp + script/setup/install-runc + script/setup/install-cni $(grep containernetworking/plugins go.mod | awk '{print $2}') + script/setup/install-critools + script/setup/install-failpoint-binaries - name: Install criu run: | - sudo apt-get install -y \ - libprotobuf-dev \ - libprotobuf-c-dev \ - protobuf-c-compiler \ - protobuf-compiler \ - python-protobuf \ - libnl-3-dev \ - libnet-dev \ - libcap-dev \ - python-future - wget https://github.com/checkpoint-restore/criu/archive/v3.13.tar.gz -O criu.tar.gz - tar -zxf criu.tar.gz - cd criu-3.13 - sudo make install-criu + sudo add-apt-repository ppa:criu/ppa + sudo apt-get update + sudo apt-get install -y criu - name: Install containerd env: CGO_ENABLED: 1 run: | - make binaries + make binaries GO_BUILD_FLAGS="-mod=vendor" sudo -E PATH=$PATH make install - working-directory: src/github.com/containerd/containerd - - run: sudo -E PATH=$PATH script/setup/install-gotestsum - working-directory: src/github.com/containerd/containerd + - run: script/setup/install-gotestsum - name: Tests env: - GOPROXY: direct GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-unit-root-junit.xml run: | make test sudo -E PATH=$PATH make root-test - working-directory: src/github.com/containerd/containerd - name: Integration 1 env: - GOPROXY: direct TEST_RUNTIME: ${{ matrix.runtime }} RUNC_FLAVOR: ${{ matrix.runc }} GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-serial-junit.xml run: | - sudo -E PATH=$PATH make integration EXTRA_TESTFLAGS=-no-criu TESTFLAGS_RACE=-race - working-directory: src/github.com/containerd/containerd + extraflags="" + [ "${RUNC_FLAVOR}" == "crun" ] && { + extraflags="EXTRA_TESTFLAGS=-no-criu"; + } + sudo -E PATH=$PATH make integration ${extraflags} TESTFLAGS_RACE=-race # Run the integration suite a second time. See discussion in github.com/containerd/containerd/pull/1759 - name: Integration 2 env: - GOPROXY: direct TEST_RUNTIME: ${{ matrix.runtime }} RUNC_FLAVOR: ${{ matrix.runc }} GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-parallel-junit.xml run: | - sudo -E PATH=$PATH TESTFLAGS_PARALLEL=1 make integration EXTRA_TESTFLAGS=-no-criu - working-directory: src/github.com/containerd/containerd - - # CRIU wouldn't work with overlay snapshotter yet. - # See https://github.com/containerd/containerd/pull/4708#issuecomment-724322294. - - name: CRIU Integration - env: - GOPROXY: direct - TEST_RUNTIME: ${{ matrix.runtime }} - RUNC_FLAVOR: ${{ matrix.runc }} - GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-criu-junit.xml - # crun doesn't have "checkpoint" command. - if: ${{ matrix.runc == 'runc' }} - run: | - sudo -E PATH=$PATH \ - TESTFLAGS_PARALLEL=1 \ - TEST_SNAPSHOTTER=native \ - make integration EXTRA_TESTFLAGS='-run TestCheckpoint' - working-directory: src/github.com/containerd/containerd + extraflags="" + [ "${RUNC_FLAVOR}" == "crun" ] && { + extraflags="EXTRA_TESTFLAGS=-no-criu"; + } + sudo -E PATH=$PATH TESTFLAGS_PARALLEL=1 make integration ${extraflags} - name: CRI Integration Test env: TEST_RUNTIME: ${{ matrix.runtime }} run: | CONTAINERD_RUNTIME=$TEST_RUNTIME make cri-integration - working-directory: src/github.com/containerd/containerd - name: cri-tools critest env: TEST_RUNTIME: ${{ matrix.runtime }} run: | BDIR="$(mktemp -d -p $PWD)" + + function cleanup() { + sudo pkill containerd || true + cat ${BDIR}/containerd-cri.log + sudo -E rm -rf ${BDIR} + } + trap cleanup EXIT + mkdir -p ${BDIR}/{root,state} cat > ${BDIR}/config.toml < ${BDIR}/containerd-cri.log & sudo -E PATH=$PATH /usr/local/bin/ctr -a ${BDIR}/c.sock version sudo -E PATH=$PATH critest --report-dir "${{github.workspace}}/critestreport" --runtime-endpoint=unix:///${BDIR}/c.sock --parallel=8 - TEST_RC=$? - test $TEST_RC -ne 0 && cat ${BDIR}/containerd-cri.log - sudo pkill containerd - sudo -E rm -rf ${BDIR} - test $TEST_RC -eq 0 || /bin/false # Log the status of this VM to investigate issues like # https://github.com/containerd/containerd/issues/4969 @@ -486,7 +467,7 @@ jobs: mount df losetup -l - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: always() with: name: TestResults ${{ matrix.runtime }} ${{matrix.runc}} @@ -496,65 +477,62 @@ jobs: tests-mac-os: name: MacOS unit tests - runs-on: macos-10.15 + runs-on: macos-12 timeout-minutes: 10 - needs: [project, linters, protos, man] + needs: [linters, protos, man] env: GOTEST: gotestsum -- steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' - - uses: actions/checkout@v2 - with: - path: src/github.com/containerd/containerd - - - name: Set env - run: | - echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - - - run: sudo -E PATH=$PATH script/setup/install-gotestsum - working-directory: src/github.com/containerd/containerd + go-version: ${{ env.GO_VERSION }} + - uses: actions/checkout@v3 + - run: script/setup/install-gotestsum - name: Tests env: - GOPROXY: direct GOTESTSUM_JUNITFILE: "${{ github.workspace }}/macos-test-junit.xml" - run: | - make test - working-directory: src/github.com/containerd/containerd - - uses: actions/upload-artifact@v2 + run: make test + - uses: actions/upload-artifact@v3 if: always() with: name: TestResults MacOS path: | *-junit.xml - cgroup2: - name: CGroupsV2 and SELinux Integration + vagrant: + name: Vagrant # nested virtualization is only available on macOS hosts - runs-on: macos-10.15 + runs-on: macos-12 timeout-minutes: 45 - needs: [project, linters, protos, man] + needs: [linters, protos, man] strategy: + fail-fast: false matrix: # Currently crun is disabled to decrease CI flakiness. # We can enable crun again when we get a better CI infra. runc: [runc] + # Fedora is for testing cgroup v2 functionality, Rocky Linux is for testing on an enterprise-grade environment + box: ["fedora/37-cloud-base", "rockylinux/8"] env: GOTEST: gotestsum -- steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: "Cache ~/.vagrant.d/boxes" - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/.vagrant.d/boxes key: vagrant-${{ hashFiles('Vagrantfile*') }} - name: Vagrant start + env: + BOX: ${{ matrix.box }} run: | + if [ "$BOX" = "rockylinux/8" ]; then + # The latest version 5.0.0 seems 404 (as of March 30, 2022) + export BOX_VERSION="4.0.0" + fi # Retry if it fails (download.fedoraproject.org returns 404 sometimes) vagrant up || vagrant up @@ -571,6 +549,11 @@ jobs: SELINUX: Enforcing REPORT_DIR: /tmp/critestreport run: vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-cri + + - name: Collect the VM's IP address for Docker Hub's throttling issue + if: failure() + run: vagrant ssh -- curl https://api64.ipify.org/ + - name: Get test reports if: always() run: | @@ -579,10 +562,43 @@ jobs: vagrant plugin install vagrant-scp vagrant scp :/tmp/test-integration-junit.xml "${{ github.workspace }}/" vagrant scp :/tmp/critestreport "${{ github.workspace }}/critestreport" - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 if: always() with: - name: TestResults cgroup2 ${{ matrix.runtime }} ${{matrix.runc}} + # ${{ matrix.box }} cannot be used here due to character limitation + name: TestResults vagrant ${{ github.run_id }} ${{ matrix.runtime }} ${{matrix.runc}} path: | ${{github.workspace}}/*-junit.xml ${{github.workspace}}/critestreport/* + + cgroup2-misc: + name: CGroupsV2 - rootless CRI test + # nested virtualization is only available on macOS hosts + runs-on: macos-12 + timeout-minutes: 45 + needs: [linters, protos, man] + steps: + - uses: actions/checkout@v3 + + - name: "Cache ~/.vagrant.d/boxes" + uses: actions/cache@v3 + with: + path: ~/.vagrant.d/boxes + key: vagrant-${{ hashFiles('Vagrantfile*') }} + + - name: Vagrant start + run: | + # Retry if it fails (download.fedoraproject.org returns 404 sometimes) + vagrant up || vagrant up + + # slow, so separated from the regular cgroup2 task + - name: CRI-in-UserNS test with Rootless Podman + run: | + vagrant up --provision-with=install-rootless-podman + # Execute rootless podman to create the UserNS env + vagrant ssh -- podman build --target cri-in-userns -t cri-in-userns -f /vagrant/contrib/Dockerfile.test /vagrant + vagrant ssh -- podman run --rm --privileged cri-in-userns + + - name: Collect the VM's IP address for Docker Hub's throttling issue + if: failure() + run: vagrant ssh -- curl https://api64.ipify.org/ diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3eb7bd3..19f5625 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -2,15 +2,24 @@ name: "CodeQL Scan" on: push: - schedule: - - cron: '0 0 * * 0' + branches: + - main + - 'release/**' pull_request: - paths: - - '.github/workflows/codeql.yml' + branches: + - main + - 'release/**' + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read jobs: CodeQL-Build: - + if: github.repository == 'containerd/containerd' + permissions: + actions: read # for github/codeql-action/init to get workflow details + contents: read # for actions/checkout to fetch code + security-events: write # for github/codeql-action/analyze to upload SARIF results strategy: fail-fast: false @@ -20,30 +29,22 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 + + - uses: actions/setup-go@v3 + with: + go-version: 1.20.8 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v2 # Override language selection by uncommenting this and choosing your languages # with: # languages: go, javascript, csharp, python, cpp, java - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below). - - name: Autobuild - uses: github/codeql-action/autobuild@v1 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release + - run: | + sudo apt-get install -y libseccomp-dev libbtrfs-dev + make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/images.yml b/.github/workflows/images.yml new file mode 100644 index 0000000..8462235 --- /dev/null +++ b/.github/workflows/images.yml @@ -0,0 +1,77 @@ +name: "Mirror Test Image" +on: + workflow_dispatch: + inputs: + upstream: + description: "Upstream image to mirror" + required: true + default: "docker.io/library/busybox:1.32" + image: + description: "Target image name (override)" + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + +jobs: + mirror: + name: "Mirror Image" + runs-on: ubuntu-latest + permissions: + packages: write + + defaults: + run: + working-directory: src/github.com/containerd/containerd + + steps: + - uses: actions/setup-go@v3 + with: + go-version: "1.20.8" + + - uses: actions/checkout@v3 + with: + path: src/github.com/containerd/containerd + + - name: Set env + shell: bash + run: | + echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV + echo "${{ github.workspace }}/bin" >> $GITHUB_PATH + + - name: Install containerd dependencies + env: + RUNC_FLAVOR: ${{ matrix.runc }} + GOFLAGS: -modcacherw + run: | + sudo apt-get install -y gperf + sudo -E PATH=$PATH script/setup/install-seccomp + + - name: Install containerd + env: + CGO_ENABLED: 1 + run: | + make binaries GO_BUILD_FLAGS="-mod=vendor" GO_BUILDTAGS="no_btrfs" + sudo -E PATH=$PATH make install + + - name: Pull and push image + shell: bash + run: | + sudo containerd -l debug & > /tmp/containerd.out + containerd_pid=$! + sleep 5 + + upstream=${{ github.event.inputs.upstream }} + target=${{ github.event.inputs.image }} + if [[ "$target" == "" ]]; then + mirror="ghcr.io/containerd/${upstream##*/}" + else + mirror="ghcr.io/containerd/${target}" + fi + + echo "Mirroring $upstream to $mirror" + + sudo ctr content fetch --all-platforms ${upstream} + sudo ctr images ls + sudo ctr --debug images push -u ${{ github.actor }}:${{ secrets.GITHUB_TOKEN }} ${mirror} ${upstream} + + sudo kill $containerd_pid diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 97320a1..8babdce 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -6,6 +6,12 @@ on: paths: - '.github/workflows/nightly.yml' +env: + GO_VERSION: "1.20.8" + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + jobs: linux: name: Linux @@ -16,11 +22,11 @@ jobs: working-directory: src/github.com/containerd/containerd steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' + go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: src/github.com/containerd/containerd @@ -36,12 +42,13 @@ jobs: - name: Install dependencies run: | - sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc) main" || true - sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc)-updates main" || true + sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el,riscv64] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc) main" || true + sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el,riscv64] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc)-updates main" || true sudo dpkg --add-architecture arm64 sudo dpkg --add-architecture s390x sudo dpkg --add-architecture ppc64el + sudo dpkg --add-architecture riscv64 sudo apt-get update || true @@ -49,14 +56,12 @@ jobs: crossbuild-essential-arm64 \ crossbuild-essential-s390x \ crossbuild-essential-ppc64el \ - libseccomp-dev:amd64 \ - libseccomp-dev:arm64 \ - libseccomp-dev:s390x \ - libseccomp-dev:ppc64el \ + crossbuild-essential-riscv64 \ libbtrfs-dev:amd64 \ libbtrfs-dev:arm64 \ libbtrfs-dev:s390x \ - libbtrfs-dev:ppc64el + libbtrfs-dev:ppc64el \ + libbtrfs-dev:riscv64 - name: Build amd64 env: @@ -96,6 +101,16 @@ jobs: make binaries mv bin bin_ppc64le + - name: Build riscv64 + env: + GOOS: linux + GOARCH: riscv64 + CGO_ENABLED: 1 + CC: riscv64-linux-gnu-gcc + run: | + make binaries + mv bin bin_riscv64 + # # Upload # @@ -124,6 +139,12 @@ jobs: name: linux_ppc64le path: src/github.com/containerd/containerd/bin_ppc64le + - name: Upload artifacts (linux_riscv64) + uses: actions/upload-artifact@v1 + with: + name: linux_riscv64 + path: src/github.com/containerd/containerd/bin_riscv64 + windows: name: Windows runs-on: windows-latest @@ -133,11 +154,11 @@ jobs: working-directory: src/github.com/containerd/containerd steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: '1.16.12' + go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: src/github.com/containerd/containerd diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9ac8310..01082fa 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,17 +5,23 @@ on: name: Containerd Release +env: + GO_VERSION: "1.20.8" + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + jobs: check: name: Check Signed Tag - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 5 outputs: stringver: ${{ steps.contentrel.outputs.stringver }} steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: ref: ${{ github.ref }} path: src/github.com/containerd/containerd @@ -38,218 +44,110 @@ jobs: id: contentrel run: | RELEASEVER=${{ github.ref }} - echo "::set-output name=stringver::${RELEASEVER#refs/tags/v}" + echo "stringver=${RELEASEVER#refs/tags/v}" >> $GITHUB_OUTPUT git tag -l ${RELEASEVER#refs/tags/} -n20000 | tail -n +3 | cut -c 5- >release-notes.md working-directory: src/github.com/containerd/containerd - name: Save release notes - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: containerd-release-notes path: src/github.com/containerd/containerd/release-notes.md build: name: Build Release Binaries - runs-on: ${{ matrix.os }} + runs-on: ubuntu-20.04 needs: [check] - timeout-minutes: 10 - + timeout-minutes: 30 strategy: matrix: - os: [ubuntu-18.04, windows-2019] - + include: + # Choose an old release of Ubuntu to avoid glibc issue https://github.com/containerd/containerd/issues/7255 + - dockerfile-ubuntu: 18.04 + dockerfile-platform: linux/amd64 + - dockerfile-ubuntu: 18.04 + dockerfile-platform: linux/arm64 + - dockerfile-ubuntu: 18.04 + dockerfile-platform: linux/ppc64le + # riscv64 isn't supported by Ubuntu 18.04 + - dockerfile-ubuntu: 22.04 + dockerfile-platform: linux/riscv64 + - dockerfile-ubuntu: 18.04 + dockerfile-platform: windows/amd64 steps: - name: Install Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: - go-version: '1.16.12' - + go-version: ${{ env.GO_VERSION }} - name: Set env shell: bash env: - MOS: ${{ matrix.os }} + MOS: ubuntu-20.04 run: | releasever=${{ github.ref }} releasever="${releasever#refs/tags/}" - os=linux - [[ "${MOS}" =~ "windows" ]] && { - os=windows - } echo "RELEASE_VER=${releasever}" >> $GITHUB_ENV echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV - echo "OS=${os}" >> $GITHUB_ENV - echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - - name: Checkout containerd - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: - repository: containerd/containerd + # Intentionally use github.repository instead of containerd/containerd to + # make this action runnable on forks. + # See https://github.com/containerd/containerd/issues/5098 for the context. + repository: ${{ github.repository }} ref: ${{ github.ref }} path: src/github.com/containerd/containerd - - name: HCS Shim commit - id: hcsshim_commit - if: startsWith(matrix.os, 'windows') - shell: bash - run: echo "::set-output name=sha::$(grep 'Microsoft/hcsshim ' go.mod | awk '{print $2}')" - working-directory: src/github.com/containerd/containerd - - - name: Checkout hcsshim source - if: startsWith(matrix.os, 'windows') - uses: actions/checkout@v2 + - name: Setup buildx instance + uses: docker/setup-buildx-action@v2 with: - repository: Microsoft/hcsshim - ref: ${{ steps.hcsshim_commit.outputs.sha }} - path: src/github.com/Microsoft/hcsshim - + use: true + - uses: crazy-max/ghaction-github-runtime@v2 # sets up needed vars for caching to github - name: Make shell: bash run: | - make build - make binaries - rm bin/containerd-stress* - [[ "${OS}" == "windows" ]] && { - ( - bindir="$(pwd)/bin" - cd ../../Microsoft/hcsshim - GO111MODULE=on go build -mod=vendor -o "${bindir}/containerd-shim-runhcs-v1.exe" ./cmd/containerd-shim-runhcs-v1 - ) - } - TARFILE="containerd-${RELEASE_VER#v}-${OS}-amd64.tar.gz" - tar czf ${TARFILE} bin/ - sha256sum ${TARFILE} >${TARFILE}.sha256sum - working-directory: src/github.com/containerd/containerd - - - name: Save build binaries - uses: actions/upload-artifact@v2 - with: - name: containerd-binaries-${{ matrix.os }} - path: src/github.com/containerd/containerd/*.tar.gz* - - - name: Make cri-containerd tar - shell: bash - env: - RUNC_FLAVOR: runc - run: | - if [[ "${OS}" == "linux" ]]; then - sudo apt-get update - sudo apt-get install -y gperf - sudo -E PATH=$PATH script/setup/install-seccomp + cache="--cache-from=type=gha,scope=containerd-release --cache-to=type=gha,scope=containerd-release" + if [[ "${PLATFORM}" =~ "windows" ]]; then + # For Windows the cni build script generates a config but shells out to powershell (and also assume it is running on windows) to get a gateway and subnet. + # The values provided here are taken from packages that we previously generated. + export GATEWAY=172.21.16.1 + export PREFIX_LEN=12 + BUILD_ARGS="--build-arg GATEWAY --build-arg PREFIX_LEN" fi - make cri-cni-release - working-directory: src/github.com/containerd/containerd + docker buildx build ${cache} --build-arg RELEASE_VER --build-arg UBUNTU_VERSION=${{ matrix.dockerfile-ubuntu }} --build-arg GO_VERSION ${BUILD_ARGS} -f .github/workflows/release/Dockerfile --platform=${PLATFORM} -o releases/ . + echo PLATFORM_CLEAN=${PLATFORM/\//-} >> $GITHUB_ENV - - name: Save cri-containerd binaries - uses: actions/upload-artifact@v2 + # Remove symlinks since we don't want these in the release Artifacts + find ./releases/ -maxdepth 1 -type l | xargs rm + working-directory: src/github.com/containerd/containerd + env: + PLATFORM: ${{ matrix.dockerfile-platform }} + - name: Save Artifacts + uses: actions/upload-artifact@v3 with: - name: cri-containerd-binaries-${{ matrix.os }} - path: src/github.com/containerd/containerd/releases/cri-containerd-cni-*.tar.gz* + name: release-tars-${{env.PLATFORM_CLEAN}} + path: src/github.com/containerd/containerd/releases/*.tar.gz* release: name: Create containerd Release - runs-on: ubuntu-18.04 + permissions: + contents: write + runs-on: ubuntu-20.04 timeout-minutes: 10 needs: [build, check] - steps: - name: Download builds and release notes - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: path: builds - - name: Catalog build assets for upload - id: catalog - run: | - _filenum=1 - for i in "ubuntu-18.04" "windows-2019"; do - for f in `ls builds/containerd-binaries-${i}`; do - echo "::set-output name=file${_filenum}::${f}" - let "_filenum+=1" - done - for f in `ls builds/cri-containerd-binaries-${i}`; do - echo "::set-output name=file${_filenum}::${f}" - let "_filenum+=1" - done - done - name: Create Release - id: create_release - uses: actions/create-release@v1.1.2 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + uses: softprops/action-gh-release@v1 with: - tag_name: ${{ github.ref }} - release_name: containerd ${{ needs.check.outputs.stringver }} - body_path: ./builds/containerd-release-notes/release-notes.md + token: ${{ secrets.GITHUB_TOKEN }} + fail_on_unmatched_files: true + name: containerd ${{ needs.check.outputs.stringver }} draft: false prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'rc') }} - - name: Upload Linux containerd tarball - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file1 }} - asset_name: ${{ steps.catalog.outputs.file1 }} - asset_content_type: application/gzip - - name: Upload Linux sha256 sum - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file2 }} - asset_name: ${{ steps.catalog.outputs.file2 }} - asset_content_type: text/plain - - name: Upload Linux cri containerd tarball - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/cri-containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file3 }} - asset_name: ${{ steps.catalog.outputs.file3 }} - asset_content_type: application/gzip - - name: Upload Linux cri sha256 sum - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/cri-containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file4 }} - asset_name: ${{ steps.catalog.outputs.file4 }} - asset_content_type: text/plain - - name: Upload Windows containerd tarball - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/containerd-binaries-windows-2019/${{ steps.catalog.outputs.file5 }} - asset_name: ${{ steps.catalog.outputs.file5 }} - asset_content_type: application/gzip - - name: Upload Windows sha256 sum - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/containerd-binaries-windows-2019/${{ steps.catalog.outputs.file6 }} - asset_name: ${{ steps.catalog.outputs.file6 }} - asset_content_type: text/plain - - name: Upload Windows cri containerd tarball - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/cri-containerd-binaries-windows-2019/${{ steps.catalog.outputs.file7 }} - asset_name: ${{ steps.catalog.outputs.file7 }} - asset_content_type: application/gzip - - name: Upload Windows cri sha256 sum - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./builds/cri-containerd-binaries-windows-2019/${{ steps.catalog.outputs.file8 }} - asset_name: ${{ steps.catalog.outputs.file8 }} - asset_content_type: text/plain + body_path: ./builds/containerd-release-notes/release-notes.md + files: | + builds/release-tars-**/* diff --git a/.github/workflows/release/Dockerfile b/.github/workflows/release/Dockerfile new file mode 100644 index 0000000..489087d --- /dev/null +++ b/.github/workflows/release/Dockerfile @@ -0,0 +1,62 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# UBUNTU_VERSION can be set to 18.04 (bionic), 20.04 (focal), or 22.04 (jammy) +ARG UBUNTU_VERSION=18.04 +ARG BASE_IMAGE=ubuntu:${UBUNTU_VERSION} +ARG GO_VERSION +ARG GO_IMAGE=golang:${GO_VERSION} +FROM --platform=$BUILDPLATFORM $GO_IMAGE AS go +FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.1.0@sha256:76a8510b1798f66fcc87e7ec2f4684aa1b16756df2a397ec307b9efb6023f6c5 AS xx + +FROM --platform=$BUILDPLATFORM ${BASE_IMAGE} AS base +COPY --from=xx / / +SHELL ["/bin/bash", "-xec"] +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && \ + apt-get install -y dpkg-dev git make pkg-config +ARG TARGETPLATFORM +RUN xx-apt-get install -y libseccomp-dev btrfs-progs gcc +RUN if grep -qE 'UBUNTU_CODENAME=(focal|jammy)' /etc/os-release; then xx-apt-get install -y libbtrfs-dev; fi +ENV PATH=/usr/local/go/bin:$PATH +ENV GOPATH=/go +ENV CGO_ENABLED=1 + +FROM base AS linux +FROM base AS windows +# Set variables used by cni script which would otherwise shell out to powershell +ARG GATEWAY +ARG PREFIX_LEN + +FROM ${TARGETOS} AS target +WORKDIR /go/src/github.com/containerd/containerd +COPY . . +ARG TARGETPLATFORM +ARG RELEASE_VER +ENV VERSION=$RELEASE_VER +RUN \ + --mount=type=bind,from=go,source=/usr/local/go,target=/usr/local/go \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg \ + export CC=$(xx-info)-gcc && xx-go --wrap && \ + make release cri-release cri-cni-release && \ + for f in $(find bin -executable -type f); do xx-verify $f; done + +# check git working tree after build +RUN \ + export GIT_STATUS_OUTPUT=$(git status --porcelain) && \ + test -z $GIT_STATUS_OUTPUT || (echo $GIT_STATUS_OUTPUT && exit 1) + +FROM scratch AS release +COPY --from=target /go/src/github.com/containerd/containerd/releases/ / diff --git a/.github/workflows/windows-periodic-trigger.yml b/.github/workflows/windows-periodic-trigger.yml new file mode 100644 index 0000000..48aad09 --- /dev/null +++ b/.github/workflows/windows-periodic-trigger.yml @@ -0,0 +1,32 @@ +# Workflow intended to periodically run the Windows Integration test workflow. + +name: Windows Periodic Tests + +on: + workflow_dispatch: + schedule: + - cron: "0 1 * * *" + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + +jobs: + + triggerWinIntegration: + # NOTE: the following permissions are required by `google-github-actions/auth`: + permissions: + contents: 'read' + id-token: 'write' + if: github.repository == 'containerd/containerd' + # NOTE(aznashwan, 11/24/21): GitHub actions do not currently support referencing + # or evaluating any kind of variables in the `uses` clause, but this will + # ideally be added in the future in which case the hardcoded reference to the + # upstream containerd repository should be replaced with the following to + # potentially allow contributors to enable periodic Windows tests on forks as well: + # uses: "${{ github.repository }}/.github/workflows/windows-periodic.yml@${{ github.ref_name }}" + uses: containerd/containerd/.github/workflows/windows-periodic.yml@main + secrets: + AZURE_SUB_ID: "${{ secrets.AZURE_SUB_ID }}" + AZURE_CREDS: "${{ secrets.AZURE_CREDS }}" + GCP_SERVICE_ACCOUNT: "${{ secrets.GCP_SERVICE_ACCOUNT }}" + GCP_WORKLOAD_IDENTITY_PROVIDER: "${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}" diff --git a/.github/workflows/windows-periodic.yml b/.github/workflows/windows-periodic.yml new file mode 100644 index 0000000..0ed183f --- /dev/null +++ b/.github/workflows/windows-periodic.yml @@ -0,0 +1,256 @@ +# Workflow intended to run containerd integration tests on Windows. + +name: Windows Integration Tests + +on: + workflow_dispatch: + workflow_call: + secrets: + AZURE_SUB_ID: + required: true + AZURE_CREDS: + required: true + GCP_SERVICE_ACCOUNT: + required: true + GCP_WORKLOAD_IDENTITY_PROVIDER: + required: true + +env: + AZURE_DEFAULT_LOCATION: westeurope + AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }} + AZURE_DEFAULT_VM_SIZE: Standard_D2s_v3 + PASSWORD: Passw0rdAdmin # temp for testing, will be generated + DEFAULT_ADMIN_USERNAME: azureuser + SSH_OPTS: "-o ServerAliveInterval=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + REMOTE_VM_BIN_PATH: "c:\\containerd\\bin" + BUSYBOX_TESTING_IMAGE_REF: "registry.k8s.io/e2e-test-images/busybox:1.29-2" + RESOURCE_CONSUMER_TESTING_IMAGE_REF: "registry.k8s.io/e2e-test-images/resource-consumer:1.10" + WEBSERVER_TESTING_IMAGE_REF: "registry.k8s.io/e2e-test-images/nginx:1.14-2" + +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + +jobs: + winIntegration: + # NOTE: the following permissions are required by `google-github-actions/auth`: + permissions: + contents: 'read' + id-token: 'write' + strategy: + matrix: + win_ver: [ltsc2019, ltsc2022] + include: + - win_ver: ltsc2019 + AZURE_IMG: "MicrosoftWindowsServer:WindowsServer:2019-Datacenter-with-Containers-smalldisk:17763.1935.2105080716" + AZURE_RESOURCE_GROUP: ctrd-integration-ltsc2019-${{ github.run_id }} + GOOGLE_BUCKET: "containerd-integration/logs/windows-ltsc2019/" + - win_ver: ltsc2022 + AZURE_IMG: "MicrosoftWindowsServer:WindowsServer:2022-datacenter-smalldisk-g2:20348.169.2108120020" + AZURE_RESOURCE_GROUP: ctrd-integration-ltsc2022-${{ github.run_id }} + GOOGLE_BUCKET: "containerd-integration/logs/windows-ltsc2022/" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install required packages + run: | + sudo apt-get install xmlstarlet -y + + - name: PrepareArtifacts + run: | + STARTED_TIME=$(date +%s) + LOGS_DIR=$HOME/$STARTED_TIME + echo "STARTED_TIME=$STARTED_TIME" >> $GITHUB_ENV + echo "LOGS_DIR=$LOGS_DIR" >> $GITHUB_ENV + mkdir -p $LOGS_DIR/artifacts + + jq -n --arg node temp --arg timestamp $STARTED_TIME '$timestamp|tonumber|{timestamp:.,$node}' > $LOGS_DIR/started.json + + - name: Generate ssh key pair + run: | + mkdir -p $HOME/.ssh/ + ssh-keygen -t rsa -b 4096 -C "ci@containerd.com" -f $HOME/.ssh/id_rsa -q -N "" + echo "SSH_PUB_KEY=$(cat ~/.ssh/id_rsa.pub)" >> $GITHUB_ENV + + - name: AZLogin + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDS }} + + - name: AZResourceGroupCreate + uses: azure/CLI@v1 + with: + inlinescript: | + az group create -n ${{ matrix.AZURE_RESOURCE_GROUP }} -l ${{ env.AZURE_DEFAULT_LOCATION }} --tags creationTimestamp=$(date -u '+%Y-%m-%dT%H:%M:%SZ') + + - name: AZTestVMCreate + uses: azure/CLI@v1 + with: + inlinescript: | + DETAILS=$(az vm create -n winTestVM --admin-username ${{ env.DEFAULT_ADMIN_USERNAME }} --admin-password ${{ env.PASSWORD }} --image ${{ matrix.AZURE_IMG }} -g ${{ matrix.AZURE_RESOURCE_GROUP }} --nsg-rule SSH --size ${{ env.AZURE_DEFAULT_VM_SIZE }} --public-ip-sku Standard -o json) + PUB_IP=$(echo $DETAILS | jq -r .publicIpAddress) + if [ "$PUB_IP" == "null" ] + then + RETRY=0 + while [ "$PUB_IP" == "null" ] || [ $RETRY -le 5 ] + do + sleep 5 + PUB_IP=$(az vm show -d -g ${{ matrix.AZURE_RESOURCE_GROUP }} -n winTestVM -o json --query publicIps | jq -r) + RETRY=$(( $RETRY + 1 )) + done + fi + + if [ "$PUB_IP" == "null" ] + then + echo "failed to fetch public IP" + exit 1 + fi + echo "VM_PUB_IP=$PUB_IP" >> $GITHUB_ENV + + - name: EnableAZVMSSH + uses: azure/CLI@v1 + with: + inlinescript: | + az vm run-command invoke --command-id RunPowerShellScript -n winTestVM -g ${{ matrix.AZURE_RESOURCE_GROUP }} --scripts @$GITHUB_WORKSPACE/script/setup/enable_ssh_windows.ps1 --parameters 'SSHPublicKey=${{ env.SSH_PUB_KEY }}' + + - name: TestSSHConnection + run: | + if ! ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "hostname"; + then + exit 1 + fi + + - name: InstallContainerFeatureWS2022 + if: ${{ matrix.win_ver == 'ltsc2022' }} + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { Install-WindowsFeature -Name 'Containers' -Restart }" + + - name: WaitForVMToRestart + if: ${{ matrix.win_ver == 'ltsc2022' }} + timeout-minutes: 5 + run: | + # give the vm 30 seconds to actually stop. SSH server might actually respond while server is shutting down. + sleep 30 + while [ ! $( ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "hostname") ]; + do + echo "Unable to connect to azurevm" + done + echo "Connection reestablished. VM restarted succesfully." + + - name: CreateNatNetworkWS2022 + if: ${{ matrix.win_ver == 'ltsc2022' }} + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { curl.exe -L 'https://raw.githubusercontent.com/microsoft/SDN/master/Kubernetes/windows/hns.psm1' -o hns.psm1 }" + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { Import-Module .\hns.psm1 ; New-HnsNetwork -Type NAT -Name nat -AddressPrefix 172.19.208.0/20 -Gateway 172.19.208.1 }" + + - name: PrepareTestingEnv + run: | + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} $GITHUB_WORKSPACE/script/setup/prepare_env_windows.ps1 azureuser@${{ env.VM_PUB_IP }}:/prepare_env_windows.ps1 + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "c:\\prepare_env_windows.ps1" + + - name: MakeContainerDBins + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "git clone http://github.com/containerd/containerd c:\\containerd " + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "cd c:\containerd ; make binaries" + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "git clone http://github.com/Microsoft/hcsshim c:\containerd\hcsshim " + + # Get shim commit from containerd local repo + SHIM_COMMIT=$(grep 'Microsoft/hcsshim' go.mod | awk '{ print $2 }'); + + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "cd c:\containerd\hcsshim; git fetch --tags origin $SHIM_COMMIT ; \ + git checkout $SHIM_COMMIT ; go build -mod=vendor -o ${{ env.REMOTE_VM_BIN_PATH }}\containerd-shim-runhcs-v1.exe .\cmd\containerd-shim-runhcs-v1" + + - name: RunIntegrationTests + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -s" << EOF + cd /c/containerd + export EXTRA_TESTFLAGS="-timeout=20m" + make integration | tee /c/Logs/integration.log + EOF + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -c 'cat /c/Logs/integration.log | go-junit-report.exe > /c/Logs/junit_00.xml'" + + - name: PrepareRepoList + run: | + cat > repolist.toml << EOF + busybox = "${{ env.BUSYBOX_TESTING_IMAGE_REF }}" + ResourceConsumer = "${{ env.RESOURCE_CONSUMER_TESTING_IMAGE_REF }}" + EOF + + cat > cri-test-images.yaml << EOF + defaultTestContainerImage: ${{ env.BUSYBOX_TESTING_IMAGE_REF }} + webServerTestImage: ${{ env.WEBSERVER_TESTING_IMAGE_REF }} + EOF + + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} repolist.toml azureuser@${{ env.VM_PUB_IP }}:c:/repolist.toml + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} cri-test-images.yaml azureuser@${{ env.VM_PUB_IP }}:c:/cri-test-images.yaml + + - name: RunCRIIntegrationTests + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -s" < c:/Logs/junit_01.xml' " + + - name: GetCritestRepo + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "git clone https://github.com/kubernetes-sigs/cri-tools c:/cri-tools" + + - name: BuildCritest + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -c 'cd /c/cri-tools && make critest'" + + - name: RunCritest + run: | + ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { Start-Process -FilePath C:\containerd\bin\containerd.exe -NoNewWindow -RedirectStandardError true -PassThru ; get-process | sls containerd ; start-sleep 5 ; c:\cri-tools\build\bin\critest.exe --runtime-endpoint=\"npipe:\\\\.\\pipe\\containerd-containerd\" --test-images-file='c:\cri-test-images.yaml' --report-dir='c:\Logs' }" + + - name: PullLogsFromWinNode + run: | + scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }}:c:/Logs/*.xml ${{ env.LOGS_DIR }}/artifacts/ + for f in $(ls ${{ env.LOGS_DIR }}/artifacts/*.xml); do + xmlstarlet ed -d "/testsuites/testsuite/properties" $f > ${{ env.LOGS_DIR }}/$(basename $f) + mv ${{ env.LOGS_DIR }}/$(basename $f) $f + done + + - name: FinishJob + run: | + jq -n --arg result SUCCESS --arg timestamp $(date +%s) '$timestamp|tonumber|{timestamp:.,$result}' > ${{ env.LOGS_DIR }}/finished.json + echo "${{ env.STARTED_TIME }}" > ${{ github.workspace }}/latest-build.txt + + - name: AssignGcpCreds + id: AssignGcpCreds + run: | + echo 'GCP_SERVICE_ACCOUNT=${{ secrets.GCP_SERVICE_ACCOUNT }}' >> $GITHUB_OUTPUT + echo 'GCP_WORKLOAD_IDENTITY_PROVIDER=${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}' >> $GITHUB_OUTPUT + + - name: AuthGcp + uses: google-github-actions/auth@v0 + if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER + with: + service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }} + workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + + - name: UploadJobReport + uses: google-github-actions/upload-cloud-storage@v0 + if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER + with: + path: ${{ github.workspace }}/latest-build.txt + destination: ${{ matrix.GOOGLE_BUCKET }} + parent: false + + - name: UploadLogsDir + uses: google-github-actions/upload-cloud-storage@v0 + if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER + with: + path: ${{ env.LOGS_DIR }} + destination: ${{ matrix.GOOGLE_BUCKET }}${{ env.STARTED_TIME}} + parent: false + + - name: ResourceCleanup + if: always() + uses: azure/CLI@v1 + with: + inlinescript: | + az group delete -g ${{ matrix.AZURE_RESOURCE_GROUP }} --yes diff --git a/.golangci.yml b/.golangci.yml index 9fa9f44..e162f0a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,27 +1,55 @@ linters: enable: - - structcheck - - varcheck - - staticcheck - - unconvert + - exportloopref # Checks for pointers to enclosing loop variables - gofmt - goimports - - golint + - gosec - ineffassign - - vet - - unused - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code disable: - errcheck issues: include: - EXC0002 + max-issues-per-linter: 0 + max-same-issues: 0 + + # Only using / doesn't work due to https://github.com/golangci/golangci-lint/issues/1398. + exclude-rules: + - path: 'archive[\\/]tarheader[\\/]' + # conversion is necessary on Linux, unnecessary on macOS + text: "unnecessary conversion" + +linters-settings: + gosec: + # The following issues surfaced when `gosec` linter + # was enabled. They are temporarily excluded to unblock + # the existing workflow, but still to be addressed by + # future works. + excludes: + - G204 + - G305 + - G306 + - G402 + - G404 run: - timeout: 3m + timeout: 8m skip-dirs: - api + - cluster - design - docs - docs/man + - releases + - reports + - test # e2e scripts diff --git a/.mailmap b/.mailmap index b6ce972..3988d47 100644 --- a/.mailmap +++ b/.mailmap @@ -29,13 +29,17 @@ Eric Ernst Eric Ren Eric Ren Eric Ren +Fabiano Fidêncio Fahed Dorgaa Frank Yang Fupan Li Fupan Li +Fupan Li +Furkan Türkal Georgia Panoutsakopoulou Guangming Wang Haiyan Meng +haoyun Harry Zhang Hu Shuai Hu Shuai @@ -53,15 +57,18 @@ John Howard John Howard Lorenz Brun Luc Perkins +Jiajun Jiang Julien Balestra Jun Lin Chen <1913688+mc256@users.noreply.github.com> Justin Cormack Justin Terry Justin Terry +Kante Kenfe-Mickaël Laventure Kevin Kern Kevin Parsons Kevin Xu +Kitt Hsu Kohei Tokunaga Krasi Georgiev Lantao Liu @@ -69,16 +76,22 @@ Lantao Liu Li Yuxuan Lifubang Lu Jingxiao +Maksym Pavlenko <865334+mxpv@users.noreply.github.com> Maksym Pavlenko Maksym Pavlenko Mario Hros Mario Hros Mario Macias Mark Gordon +Marvin Giessing Michael Crosby Michael Katsoulis Mike Brown Mohammad Asif Siddiqui +Nabeel Rana +Ng Yang +Ning Li +ningmingxiao Nishchay Kumar Oliver Stenbom Phil Estes @@ -89,6 +102,7 @@ Ross Boucher Ruediger Maass Rui Cao Sakeven Jiang +Samuel Karp Samuel Karp Seth Pellegrino <30441101+sethp-nr@users.noreply.github.com> Shaobao Feng @@ -104,16 +118,21 @@ Stephen J Day Sudeesh John Su Fei Su Xiaolin +Takumasa Sakao Ted Yu Tõnis Tiigi Wade Lee Wade Lee Wade Lee <21621232@zju.edu.cn> +Wang Bing wanglei +wanglei +wangzhan Wei Fu Wei Fu Xiaodong Zhang Xuean Yan +Yang Yang Yue Zhang Yuxing Liu Zhang Wei @@ -124,4 +143,7 @@ Zhiyu Li <404977848@qq.com> Zhongming Chang Zhoulin Xie Zhoulin Xie <42261994+JoeWrightss@users.noreply.github.com> +zounengren 张潇 +Kazuyoshi Kato +Andrey Epifanov diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 8c84572..0000000 --- a/.zuul.yaml +++ /dev/null @@ -1,35 +0,0 @@ -- project: - name: containerd/containerd - merge-mode: merge - check: - jobs: - - containerd-build-arm64 - - containerd-test-arm64 - - containerd-integration-test-arm64 - -- job: - name: containerd-build-arm64 - parent: init-test - description: | - Containerd build in openlab cluster. - run: .zuul/playbooks/containerd-build/run.yaml - nodeset: ubuntu-xenial-arm64-openlab - voting: false - -- job: - name: containerd-test-arm64 - parent: init-test - description: | - Containerd unit tests in openlab cluster. - run: .zuul/playbooks/containerd-build/unit-test.yaml - nodeset: ubuntu-xenial-arm64-openlab - voting: false - -- job: - name: containerd-integration-test-arm64 - parent: init-test - description: | - Containerd unit tests in openlab cluster. - run: .zuul/playbooks/containerd-build/integration-test.yaml - nodeset: ubuntu-xenial-arm64-openlab - voting: false diff --git a/.zuul/playbooks/containerd-build/integration-test.yaml b/.zuul/playbooks/containerd-build/integration-test.yaml deleted file mode 100644 index 9496ece..0000000 --- a/.zuul/playbooks/containerd-build/integration-test.yaml +++ /dev/null @@ -1,96 +0,0 @@ -- hosts: all - become: yes - roles: - - role: config-golang - go_version: '1.16.12' - arch: arm64 - tasks: - - name: Install pre-requisites - shell: - cmd: | - set -xe - set -o pipefail - apt-get update - apt-get install -y btrfs-tools libseccomp-dev git pkg-config lsof gperf apparmor - - go version - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: Install containerd and cri dependencies - shell: - cmd: | - set -xe - make install-deps - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: Install criu - shell: - cmd: | - set -xe - apt-get install -y \ - libprotobuf-dev \ - libprotobuf-c-dev \ - protobuf-c-compiler \ - protobuf-compiler \ - python-protobuf \ - libnl-3-dev \ - libnet-dev \ - libcap-dev \ - python-future - wget https://github.com/checkpoint-restore/criu/archive/v3.13.tar.gz -O criu.tar.gz - tar -zxf criu.tar.gz - cd criu-3.13 - make install-criu - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: Install containerd - shell: - cmd: | - set -xe - make binaries - make install | tee $LOGS_PATH/make_install.log - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: Tests - shell: - cmd: | - make test | tee $LOGS_PATH/make_test.log - make root-test | tee $LOGS_PATH/make_root-test.log - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: Integration 1 - shell: - cmd: | - make integration EXTRA_TESTFLAGS=-no-criu TESTFLAGS_RACE=-race | tee $LOGS_PATH/make_integration-test.log - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: Integration 2 - shell: - cmd: | - TESTFLAGS_PARALLEL=1 make integration EXTRA_TESTFLAGS=-no-criu | tee $LOGS_PATH/make_integration-test.log - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: CRI Integration Test - shell: - cmd: | - CONTAINERD_RUNTIME="io.containerd.runc.v2" make cri-integration | tee $LOGS_PATH/make_cri-integration-test.log - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' - - name: CRI Integration Test - shell: - cmd: | - if grep -q "FAIL:" $LOGS_PATH/*.log; then - echo "FAILURE" - exit 1 - fi - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' diff --git a/.zuul/playbooks/containerd-build/run.yaml b/.zuul/playbooks/containerd-build/run.yaml deleted file mode 100644 index a0ac088..0000000 --- a/.zuul/playbooks/containerd-build/run.yaml +++ /dev/null @@ -1,22 +0,0 @@ -- hosts: all - become: yes - roles: - - role: config-golang - go_version: '1.16.12' - arch: arm64 - tasks: - - name: Build containerd - shell: - cmd: | - set -xe - set -o pipefail - apt-get update - apt-get install -y btrfs-tools libseccomp-dev git pkg-config - - go version - make | tee $LOGS_PATH/make.txt - - cp -r ./bin $RESULTS_PATH - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' diff --git a/.zuul/playbooks/containerd-build/unit-test.yaml b/.zuul/playbooks/containerd-build/unit-test.yaml deleted file mode 100644 index be354a3..0000000 --- a/.zuul/playbooks/containerd-build/unit-test.yaml +++ /dev/null @@ -1,20 +0,0 @@ -- hosts: all - become: yes - roles: - - role: config-golang - go_version: '1.16.12' - arch: arm64 - tasks: - - name: Build and test containerd - shell: - cmd: | - set -xe - set -o pipefail - apt-get update - apt-get install -y btrfs-tools libseccomp-dev git pkg-config - - go version - make build test | tee $LOGS_PATH/make_test.txt - chdir: '{{ zuul.project.src_dir }}' - executable: /bin/bash - environment: '{{ global_env }}' diff --git a/ADOPTERS.md b/ADOPTERS.md index 4d70cca..bbf99e7 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -12,10 +12,14 @@ including the Balena project listed below. **_[IBM Cloud Private (ICP)](https://www.ibm.com/cloud/private)_** - IBM's on-premises cloud offering has containerd as a "tech preview" CRI runtime for the Kubernetes offered within this product for the past two releases, and plans to fully migrate to containerd in a future release. -**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - offers containerd as the CRI runtime in **beta** for recent versions of Kubernetes. +**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - containerd has been offered in GKE since version 1.14 and has been the default runtime since version 1.19. It is also the only supported runtime for GKE Autopilot from the launch. [More details](https://cloud.google.com/kubernetes-engine/docs/concepts/using-containerd) **_[AWS Fargate](https://aws.amazon.com/fargate)_** - uses containerd + Firecracker (noted below) as the runtime and isolation technology for containers run in the Fargate platform. Fargate is a serverless, container-native compute offering from Amazon Web Services. +**_[Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/)_** - EKS optionally offers containerd as a CRI runtime starting with Kubernetes version 1.21. In Kubernetes 1.22 the default CRI runtime will be containerd. + +**_[Bottlerocket](https://aws.amazon.com/bottlerocket/)_** - Bottlerocket is a Linux distribution from Amazon Web Services purpose-built for containers using containerd as the core system runtime. + **_Cloud Foundry_** - The [Guardian container manager](https://github.com/cloudfoundry/guardian) for CF has been using OCI runC directly with additional code from CF managing the container image and filesystem interactions, but have recently migrated to use containerd as a replacement for the extra code they had written around runC. **_Alibaba's PouchContainer_** - The Alibaba [PouchContainer](https://github.com/alibaba/pouch) project uses containerd as its runtime for a cloud native offering that has unique isolation and image distribution capabilities. @@ -32,7 +36,7 @@ including the Balena project listed below. **_BuildKit_** - The Moby project's [BuildKit](https://github.com/moby/buildkit) can use either runC or containerd as build execution backends for building container images. BuildKit support has also been built into the Docker engine in recent releases, making BuildKit provide the backend to the `docker build` command. -**_Azure acs-engine_** - Microsoft Azure's [acs-engine](https://github.com/Azure/acs-engine) open source project has customizable deployment of Kubernetes clusters, where containerd is a selectable container runtime. At some point in the future Azure's AKS service will default to use containerd as the CRI runtime for deployed Kubernetes clusters. +**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 or greater. Containerd for Windows nodes is currently in public preview. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration) **_Amazon Firecracker_** - The AWS [Firecracker VMM project](http://firecracker-microvm.io/) has extended containerd with a new snapshotter and v2 shim to allow containerd to drive virtualized container processes via their VMM implementation. More details on their containerd integration are available in [their GitHub project](https://github.com/firecracker-microvm/firecracker-containerd). @@ -42,6 +46,12 @@ including the Balena project listed below. **_Inclavare Containers_** - [Inclavare Containers](https://github.com/alibaba/inclavare-containers) is an innovation of container runtime with the novel approach for launching protected containers in hardware-assisted Trusted Execution Environment (TEE) technology, aka Enclave, which can prevent the untrusted entity, such as Cloud Service Provider (CSP), from accessing the sensitive and confidential assets in use. +**_VMware TKG_** - [Tanzu Kubernetes Grid](https://tanzu.vmware.com/kubernetes-grid) VMware's Multicloud Kubernetes offering uses containerd as the default CRI runtime. + +**_VMware TCE_** - [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) VMware's fully-featured, easy to manage, Kubernetes platform for learners and users. It is a freely available, community supported, and open source distribution of VMware Tanzu. It uses containerd as the default CRI runtime. + +**_[Talos Linux](https://www.talos.dev/)_** - Talos Linux is Linux designed for Kubernetes – secure, immutable, and minimal. Talos Linux is using containerd as the core system runtime and CRI implementation. + **_Other Projects_** - While the above list provides a cross-section of well known uses of containerd, the simplicity and clear API layer for containerd has inspired many smaller projects around providing simple container management platforms. Several examples of building higher layer functionality on top of the containerd base have come from various containerd community participants: - Michael Crosby's [boss](https://github.com/crosbymichael/boss) project, - Evan Hazlett's [stellar](https://github.com/ehazlett/stellar) project, diff --git a/BUILDING.md b/BUILDING.md index b50c780..286164d 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -14,8 +14,8 @@ This doc includes: To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required: -* Go 1.13.x or above except 1.14.x -* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases)) +* Go 1.19.x or above +* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/protocolbuffers/protobuf/releases)) * Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency. ## Build the development environment @@ -32,9 +32,9 @@ git clone https://github.com/containerd/containerd For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.11.4 release for a 64-bit Linux host: -``` -$ wget -c https://github.com/google/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip -$ sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local +```sh +wget -c https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip +sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local ``` `containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you @@ -46,38 +46,20 @@ need to satisfy these dependencies in your system: At this point you are ready to build `containerd` yourself! -## Build runc +## Runc -`runc` is the default container runtime used by `containerd` and is required to -run containerd. While it is okay to download a runc binary and install that on +Runc is the default container runtime used by `containerd` and is required to +run containerd. While it is okay to download a `runc` binary and install that on the system, sometimes it is necessary to build runc directly when working with -container runtime development. You can skip this step if you already have the -correct version of `runc` installed. - -`runc` requires `libseccomp`. You may need to install the missing dependencies: - -* CentOS/Fedora: `yum install libseccomp libseccomp-devel` -* Debian/Ubuntu: `apt-get install libseccomp libseccomp-dev` - - -For the quick and dirty installation, you can use the following: - -``` -git clone https://github.com/opencontainers/runc -cd runc -make -sudo make install -``` - -Make sure to follow the guidelines for versioning in [RUNC.md](/docs/RUNC.md) for the -best results. +container runtime development. Make sure to follow the guidelines for versioning +in [RUNC.md](/docs/RUNC.md) for the best results. ## Build containerd `containerd` uses `make` to create a repeatable build flow. It means that you can run: -``` +```sh cd containerd make ``` @@ -86,22 +68,44 @@ This is going to build all the project binaries in the `./bin/` directory. You can move them in your global path, `/usr/local/bin` with: -```sudo +```sh sudo make install ``` +The install prefix can be changed by passing the `PREFIX` variable (defaults +to `/usr/local`). + +Note: if you set one of these vars, set them to the same values on all make stages +(build as well as install). + +If you want to prepend an additional prefix on actual installation (eg. packaging or chroot install), +you can pass it via `DESTDIR` variable: + +```sh +sudo make install DESTDIR=/tmp/install-x973234/ +``` + +The above command installs the `containerd` binary to `/tmp/install-x973234/usr/local/bin/containerd` + +The current `DESTDIR` convention is supported since containerd v1.6. +Older releases was using `DESTDIR` for a different purpose that is similar to `PREFIX`. + + When making any changes to the gRPC API, you can use the installed `protoc` compiler to regenerate the API generated code packages with: -```sudo +```sh make generate ``` > *Note*: Several build tags are currently available: -> * `no_btrfs`: A build tag disables building the btrfs snapshot driver. > * `no_cri`: A build tag disables building Kubernetes [CRI](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html) support into containerd. > See [here](https://github.com/containerd/cri-containerd#build-tags) for build tags of CRI plugin. -> * `no_devmapper`: A build tag disables building the device mapper snapshot driver. +> * snapshotters (alphabetical order) +> * `no_aufs`: A build tag disables building the aufs snapshot driver. +> * `no_btrfs`: A build tag disables building the Btrfs snapshot driver. +> * `no_devmapper`: A build tag disables building the device mapper snapshot driver. +> * `no_zfs`: A build tag disables building the ZFS snapshot driver. > > For example, adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries** > Makefile target will disable the btrfs driver within the containerd Go build. @@ -117,26 +121,25 @@ Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of You can build static binaries by providing a few variables to `make`: -```sudo -make EXTRA_FLAGS="-buildmode pie" \ - EXTRA_LDFLAGS='-linkmode external -extldflags "-fno-PIC -static"' \ - BUILDTAGS="netgo osusergo static_build" +```sh +make STATIC=1 ``` > *Note*: > - static build is discouraged > - static containerd binary does not support loading shared object plugins (`*.so`) +> - static build binaries are not position-independent # Via Docker container The following instructions assume you are at the parent directory of containerd source directory. -## Build containerd +## Build containerd in a container You can build `containerd` via a Linux-based Docker container. You can build an image from this `Dockerfile`: -``` +```dockerfile FROM golang RUN apt-get update && \ @@ -158,10 +161,11 @@ This mounts `containerd` repository You are now ready to [build](#build-containerd): ```sh - make && make install +make && make install ``` -## Build containerd and runc +## Build containerd and runc in a container + To have complete core container runtime, you will need both `containerd` and `runc`. It is possible to build both of these via Docker container. You can use `git` to checkout `runc`: @@ -177,7 +181,6 @@ FROM golang RUN apt-get update && \ apt-get install -y libbtrfs-dev libseccomp-dev - ``` In our Docker container we will build `runc` build, which includes @@ -246,6 +249,7 @@ go test -v -run . -test.root ``` Example output from directly running `go test` to execute the `TestContainerList` test: + ```sh sudo go test -v -run "TestContainerList" . -test.root INFO[0000] running tests against containerd revision=f2ae8a020a985a8d9862c9eb5ab66902c2888361 version=v1.0.0-beta.2-49-gf2ae8a0 @@ -255,6 +259,10 @@ PASS ok github.com/containerd/containerd 4.778s ``` +> *Note*: in order to run `sudo go` you need to +> - either keep user PATH environment variable. ex: `sudo "PATH=$PATH" env go test ` +> - or use `go test -exec` ex: `go test -exec sudo -v -run "TestTarWithXattr" ./archive/ -test.root` + ## Additional tools ### containerd-stress diff --git a/Makefile b/Makefile index 6728380..f1b28ce 100644 --- a/Makefile +++ b/Makefile @@ -15,16 +15,22 @@ # Go command to use for build GO ?= go +INSTALL ?= install # Root directory of the project (absolute path). ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Base path used to install. -DESTDIR ?= /usr/local +# The files will be installed under `$(DESTDIR)/$(PREFIX)`. +# The convention of `DESTDIR` was changed in containerd v1.6. +PREFIX ?= /usr/local +DATADIR ?= $(PREFIX)/share +MANDIR ?= $(DATADIR)/man + TEST_IMAGE_LIST ?= # Used to populate variables in version package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always) REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) PACKAGE=github.com/containerd/containerd SHIM_CGO_ENABLED ?= 0 @@ -57,6 +63,7 @@ else endif ifndef GODEBUG + EXTRA_LDFLAGS += -s -w DEBUG_GO_GCFLAGS := DEBUG_TAGS := else @@ -67,7 +74,7 @@ endif WHALE = "🇩" ONI = "👹" -RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH} +RELEASE=containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH} CRIRELEASE=cri-containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH} CRICNIRELEASE=cri-containerd-cni-$(VERSION:v%=%)-${GOOS}-${GOARCH} @@ -82,12 +89,23 @@ ifdef BUILDTAGS endif GO_BUILDTAGS ?= GO_BUILDTAGS += ${DEBUG_TAGS} -GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(GO_BUILDTAGS)",) -GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS)' +ifneq ($(STATIC),) + GO_BUILDTAGS += osusergo netgo static_build +endif +GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",) + +GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS) +ifneq ($(STATIC),) + GO_LDFLAGS += -extldflags "-static" +endif +GO_LDFLAGS+=' + SHIM_GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) -extldflags "-static" $(EXTRA_LDFLAGS)' # Project packages. PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration) +API_PACKAGES=$(shell (cd api && $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration)) +NON_API_PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration | grep -v "containerd/api") TEST_REQUIRES_ROOT_PACKAGES=$(filter \ ${PACKAGES}, \ $(shell \ @@ -132,6 +150,9 @@ CRIDIR=$(OUTPUTDIR)/cri .PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test .DEFAULT: default +# Forcibly set the default goal to all, in case an include above brought in a rule definition. +.DEFAULT_GOAL := all + all: binaries check: proto-fmt ## run all linters @@ -149,7 +170,13 @@ generate: protos protos: bin/protoc-gen-gogoctrd ## generate protobuf @echo "$(WHALE) $@" - @PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES} + @find . -path ./vendor -prune -false -o -name '*.pb.go' | xargs rm + $(eval TMPDIR := $(shell mktemp -d)) + @mv ${ROOTDIR}/vendor ${TMPDIR} + @(cd ${ROOTDIR}/api && PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${API_PACKAGES}) + @(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${NON_API_PACKAGES}) + @mv ${TMPDIR}/vendor ${ROOTDIR} + @rm -rf ${TMPDIR} check-protos: protos ## check if protobufs needs to be generated again @echo "$(WHALE) $@" @@ -193,9 +220,19 @@ bin/cri-integration.test: cri-integration: binaries bin/cri-integration.test ## run cri integration tests @echo "$(WHALE) $@" - @./script/test/cri-integration.sh + @bash -x ./script/test/cri-integration.sh @rm -rf bin/cri-integration.test +# build runc shimv2 with failpoint control, only used by integration test +bin/containerd-shim-runc-fp-v1: integration/failpoint/cmd/containerd-shim-runc-fp-v1 FORCE + @echo "$(WHALE) $@" + @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./integration/failpoint/cmd/containerd-shim-runc-fp-v1 + +# build CNI bridge plugin wrapper with failpoint support, only used by integration test +bin/cni-bridge-fp: integration/failpoint/cmd/cni-bridge-fp FORCE + @echo "$(WHALE) $@" + @$(GO) build ${GO_BUILD_FLAGS} -o $@ ./integration/failpoint/cmd/cni-bridge-fp + benchmark: ## run benchmarks tests @echo "$(WHALE) $@" @$(GO) test ${TESTFLAGS} -bench . -run Benchmark -test.root @@ -212,16 +249,16 @@ bin/%: cmd/% FORCE $(call BUILD_BINARY) bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220 - @echo "$(WHALE) bin/containerd-shim" - @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim + @echo "$(WHALE) $@" + @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim bin/containerd-shim-runc-v1: cmd/containerd-shim-runc-v1 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220 - @echo "$(WHALE) bin/containerd-shim-runc-v1" - @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v1 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1 + @echo "$(WHALE) $@" + @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1 bin/containerd-shim-runc-v2: cmd/containerd-shim-runc-v2 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220 - @echo "$(WHALE) bin/containerd-shim-runc-v2" - @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v2 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2 + @echo "$(WHALE) $@" + @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2 binaries: $(BINARIES) ## build binaries @echo "$(WHALE) $@" @@ -237,30 +274,31 @@ genman: man/containerd.8 man/ctr.8 man/containerd.8: FORCE @echo "$(WHALE) $@" - $(GO) run cmd/gen-manpages/main.go $(@F) $(@D) + $(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D) man/ctr.8: FORCE @echo "$(WHALE) $@" - $(GO) run cmd/gen-manpages/main.go $(@F) $(@D) + $(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D) man/%: docs/man/%.md FORCE @echo "$(WHALE) $@" go-md2man -in "$<" -out "$@" define installmanpage -mkdir -p $(DESTDIR)/man/man$(2); -gzip -c $(1) >$(DESTDIR)/man/man$(2)/$(3).gz; +$(INSTALL) -d $(DESTDIR)$(MANDIR)/man$(2); +gzip -c $(1) >$(DESTDIR)$(MANDIR)/man$(2)/$(3).gz; endef -install-man: +install-man: man @echo "$(WHALE) $@" $(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage)))) + releases/$(RELEASE).tar.gz: $(BINARIES) @echo "$(WHALE) $@" @rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz - @install -d releases/$(RELEASE)/bin - @install $(BINARIES) releases/$(RELEASE)/bin + @$(INSTALL) -d releases/$(RELEASE)/bin + @$(INSTALL) $(BINARIES) releases/$(RELEASE)/bin @tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin @rm -rf releases/$(RELEASE) @@ -271,18 +309,18 @@ release: releases/$(RELEASE).tar.gz # install of cri deps into release output directory ifeq ($(GOOS),windows) install-cri-deps: $(BINARIES) - mkdir -p $(CRIDIR) + $(INSTALL) -d $(CRIDIR) DESTDIR=$(CRIDIR) script/setup/install-cni-windows cp bin/* $(CRIDIR) else install-cri-deps: $(BINARIES) @rm -rf ${CRIDIR} - @install -d ${CRIDIR}/usr/local/bin - @install -D -m 755 bin/* ${CRIDIR}/usr/local/bin - @install -d ${CRIDIR}/opt/containerd/cluster + @$(INSTALL) -d ${CRIDIR}/usr/local/bin + @$(INSTALL) -D -m 755 bin/* ${CRIDIR}/usr/local/bin + @$(INSTALL) -d ${CRIDIR}/opt/containerd/cluster @cp -r contrib/gce ${CRIDIR}/opt/containerd/cluster/ - @install -d ${CRIDIR}/etc/systemd/system - @install -m 644 containerd.service ${CRIDIR}/etc/systemd/system + @$(INSTALL) -d ${CRIDIR}/etc/systemd/system + @$(INSTALL) -m 644 containerd.service ${CRIDIR}/etc/systemd/system echo "CONTAINERD_VERSION: '$(VERSION:v%=%)'" | tee ${CRIDIR}/opt/containerd/cluster/version DESTDIR=$(CRIDIR) script/setup/install-runc @@ -290,26 +328,30 @@ install-cri-deps: $(BINARIES) DESTDIR=$(CRIDIR) script/setup/install-critools DESTDIR=$(CRIDIR) script/setup/install-imgcrypt - @install -d $(CRIDIR)/bin - @install $(BINARIES) $(CRIDIR)/bin + @$(INSTALL) -d $(CRIDIR)/bin + @$(INSTALL) $(BINARIES) $(CRIDIR)/bin endif +$(CRIDIR)/cri-containerd.DEPRECATED.txt: + @mkdir -p $(CRIDIR) + @$(INSTALL) -m 644 releases/cri-containerd.DEPRECATED.txt $@ + ifeq ($(GOOS),windows) -releases/$(CRIRELEASE).tar.gz: install-cri-deps +releases/$(CRIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" @cd $(CRIDIR) && tar -czf ../../releases/$(CRIRELEASE).tar.gz * -releases/$(CRICNIRELEASE).tar.gz: install-cri-deps +releases/$(CRICNIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" @cd $(CRIDIR) && tar -czf ../../releases/$(CRICNIRELEASE).tar.gz * else -releases/$(CRIRELEASE).tar.gz: install-cri-deps +releases/$(CRIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" - @tar -czf releases/$(CRIRELEASE).tar.gz -C $(CRIDIR) etc/crictl.yaml etc/systemd usr opt/containerd + @tar -czf releases/$(CRIRELEASE).tar.gz -C $(CRIDIR) cri-containerd.DEPRECATED.txt etc/crictl.yaml etc/systemd usr opt/containerd -releases/$(CRICNIRELEASE).tar.gz: install-cri-deps +releases/$(CRICNIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" - @tar -czf releases/$(CRICNIRELEASE).tar.gz -C $(CRIDIR) etc usr opt + @tar -czf releases/$(CRICNIRELEASE).tar.gz -C $(CRIDIR) cri-containerd.DEPRECATED.txt etc usr opt endif cri-release: releases/$(CRIRELEASE).tar.gz @@ -341,15 +383,17 @@ clean-test: ## clean up debris from previously failed tests @rm -rf /run/containerd/fifo/* @rm -rf /run/containerd-test/* @rm -rf bin/cri-integration.test + @rm -rf bin/cni-bridge-fp + @rm -rf bin/containerd-shim-runc-fp-v1 install: ## install binaries @echo "$(WHALE) $@ $(BINARIES)" - @mkdir -p $(DESTDIR)/bin - @install $(BINARIES) $(DESTDIR)/bin + @$(INSTALL) -d $(DESTDIR)$(PREFIX)/bin + @$(INSTALL) $(BINARIES) $(DESTDIR)$(PREFIX)/bin uninstall: @echo "$(WHALE) $@" - @rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES))) + @rm -f $(addprefix $(DESTDIR)$(PREFIX)/bin/,$(notdir $(BINARIES))) ifeq ($(GOOS),windows) install-deps: @@ -393,10 +437,23 @@ root-coverage: ## generate coverage profiles for unit tests that require root fi; \ done ) -vendor: ## vendor +vendor: ## ensure all the go.mod/go.sum files are up-to-date including vendor/ directory @echo "$(WHALE) $@" @$(GO) mod tidy @$(GO) mod vendor + @$(GO) mod verify + @(cd ${ROOTDIR}/integration/client && ${GO} mod tidy) + +verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date + @echo "$(WHALE) $@" + $(eval TMPDIR := $(shell mktemp -d)) + @cp -R ${ROOTDIR} ${TMPDIR} + @(cd ${TMPDIR}/containerd && ${GO} mod tidy) + @(cd ${TMPDIR}/containerd/integration/client && ${GO} mod tidy) + @diff -r -u -q ${ROOTDIR} ${TMPDIR}/containerd + @rm -rf ${TMPDIR} + @${ROOTDIR}/script/verify-go-modules.sh integration/client + help: ## this help @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort diff --git a/Makefile.linux b/Makefile.linux index c338531..0541400 100644 --- a/Makefile.linux +++ b/Makefile.linux @@ -20,8 +20,10 @@ COMMANDS += containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2 # check GOOS for cross compile builds ifeq ($(GOOS),linux) - ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64)) - GO_GCFLAGS += -buildmode=pie + ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64)) + ifeq ($(STATIC),) + GO_GCFLAGS += -buildmode=pie + endif endif endif diff --git a/Makefile.windows b/Makefile.windows index 56164e4..6e62a87 100644 --- a/Makefile.windows +++ b/Makefile.windows @@ -22,7 +22,11 @@ ifeq ($(GOARCH),amd64) TESTFLAGS_RACE= -race endif -BINARIES:=$(addsuffix .exe,$(BINARIES)) +WINDOWS_SHIM=bin/containerd-shim-runhcs-v1.exe +BINARIES := $(addsuffix .exe,$(BINARIES)) $(WINDOWS_SHIM) + +$(WINDOWS_SHIM): script/setup/install-runhcs-shim go.mod + DESTDIR=$(CURDIR)/bin $< bin/%.exe: cmd/% FORCE $(BUILD_BINARY) diff --git a/Protobuild.toml b/Protobuild.toml index d88fcd6..ccc4e79 100644 --- a/Protobuild.toml +++ b/Protobuild.toml @@ -31,28 +31,11 @@ plugins = ["grpc", "fieldpath"] "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" "google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc" -[[overrides]] -prefixes = ["github.com/containerd/containerd/api/events"] -plugins = ["fieldpath"] # disable grpc for this package - -[[overrides]] -prefixes = ["github.com/containerd/containerd/api/services/ttrpc/events/v1"] -plugins = ["ttrpc", "fieldpath"] - [[overrides]] # enable ttrpc and disable fieldpath and grpc for the shim prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"] plugins = ["ttrpc"] -# Aggregrate the API descriptors to lock down API changes. -[[descriptors]] -prefix = "github.com/containerd/containerd/api" -target = "api/next.pb.txt" -ignore_files = [ - "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" -] - # Lock down runc config [[descriptors]] prefix = "github.com/containerd/containerd/runtime/linux/runctypes" diff --git a/README.md b/README.md index 1ecec1f..f876079 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ -![containerd banner](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png) +![containerd banner light mode](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png#gh-light-mode-only) +![containerd banner dark mode](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/white/containerd-horizontal-white.png#gh-dark-mode-only) [![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/containerd)](https://pkg.go.dev/github.com/containerd/containerd) [![Build Status](https://github.com/containerd/containerd/workflows/CI/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ACI) [![Nightlies](https://github.com/containerd/containerd/workflows/Nightly/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ANightly) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield) [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271) @@ -21,7 +21,7 @@ We are a large inclusive OSS project that is welcoming help of any kind shape or * Documentation help is needed to make the product easier to consume and extend. * We need OSS community outreach / organizing help to get the word out; manage and create messaging and educational content; and to help with social media, community forums/groups, and google groups. -* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/master/GOVERNANCE.md#security-advisors) to join the team. +* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/main/GOVERNANCE.md#security-advisors) to join the team. * New sub-projects are being created, core and non-core that could use additional development help. * Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving. - If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire. @@ -41,7 +41,7 @@ If you are interested in trying out containerd see our example at [Getting Start ## Nightly builds There are nightly builds available for download [here](https://github.com/containerd/containerd/actions?query=workflow%3ANightly). -Binaries are generated from `master` branch every night for `Linux` and `Windows`. +Binaries are generated from `main` branch every night for `Linux` and `Windows`. Please be aware: nightly builds might have critical bugs, it's not recommended for use in production and no support provided. @@ -68,6 +68,14 @@ your system. See more details in [Checkpoint and Restore](#checkpoint-and-restor Build requirements for developers are listed in [BUILDING](BUILDING.md). + +## Supported Registries + +Any registry which is compliant with the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec) +is supported by containerd. + +For configuring registries, see [registry host configuration documentation](docs/hosts.md) + ## Features ### Client @@ -77,8 +85,11 @@ containerd offers a full client package to help you integrate containerd into yo ```go import ( + "context" + "github.com/containerd/containerd" "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/namespaces" ) @@ -269,7 +280,7 @@ loaded for the user's shell environment. `cri` is a native plugin of containerd. Since containerd 1.1, the cri plugin is built into the release binaries and enabled by default. > **Note:** As of containerd 1.5, the `cri` plugin is merged into the containerd/containerd repo. For example, the source code previously stored under [`containerd/cri/pkg`](https://github.com/containerd/cri/tree/release/1.4/pkg) -was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/master/pkg/cri). +was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/main/pkg/cri). The `cri` plugin has reached GA status, representing that it is: * Feature complete @@ -289,7 +300,7 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c * [CRI Plugin Testing Guide](./docs/cri/testing.md) * [Debugging Pods, Containers, and Images with `crictl`](./docs/cri/crictl.md) * [Configuring `cri` Plugins](./docs/cri/config.md) -* [Configuring containerd](https://github.com/containerd/containerd/blob/master/docs/man/containerd-config.8.md) +* [Configuring containerd](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.8.md) ### Communication @@ -315,14 +326,14 @@ copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by ## Project details -**containerd** is the primary open source project within the broader containerd GitHub repository. +**containerd** is the primary open source project within the broader containerd GitHub organization. However, all projects within the repo have common maintainership, governance, and contributing guidelines which are stored in a `project` repository commonly for all containerd projects. Please find all these core project documents, including the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/RELEASES.md b/RELEASES.md index 3fda996..2476248 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -27,7 +27,7 @@ considered "pre-releases". ### Major and Minor Releases -Major and minor releases of containerd will be made from master. Releases of +Major and minor releases of containerd will be made from main. Releases of containerd will be marked with GPG signed tags and announced at https://github.com/containerd/containerd/releases. The tag will be of the format `v..` and should be made with the command `git tag @@ -43,7 +43,7 @@ done against that branch. Pre-releases, such as alphas, betas and release candidates will be conducted from their source branch. For major and minor releases, these releases will be -done from master. For patch releases, these pre-releases should be done within +done from main. For patch releases, these pre-releases should be done within the corresponding release branch. While pre-releases are done to assist in the stabilization process, no @@ -89,7 +89,7 @@ whichever is longer. Additionally, releases may have an extended security suppor period after the end of the active period to accept security backports. This timeframe will be decided by maintainers before the end of the active status. -The current state is available in the following table: +The current state is available in the following tables: | Release | Status | Start | End of Life | |---------|-------------|------------------|-------------------| @@ -100,12 +100,27 @@ The current state is available in the following table: | [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | | [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | | [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | -| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.4) | Active | August 17, 2020 | max(August 17, 2021, release of 1.5.0 + 6 months) | -| [1.5](https://github.com/containerd/containerd/milestone/30) | Next | TBD | max(TBD+1 year, release of 1.6.0 + 6 months) | +| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.12) | Extended | August 17, 2020 | March 3, 2022 (Extended) | +| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.9) | Active | May 3, 2021 | October 28, 2022 | +| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.0) | Active | February 15, 2022 | max(February 15, 2023 or release of 1.7.0 + 6 months) | +| [1.7](https://github.com/containerd/containerd/milestone/42) | Next | TBD | TBD | Note that branches and release from before 1.0 may not follow these rules. -This table should be updated as part of the release preparation process. +| CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version | +|------------------------|--------------------|--------------------|--------------| +| v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 | +| v1.0.0-beta.x | | 1.9 | v1alpha1 | +| End-Of-Life | v1.1 (End-Of-Life) | 1.10+ | v1alpha2 | +| | v1.2 (End-Of-Life) | 1.10+ | v1alpha2 | +| | v1.3 (End-Of-Life) | 1.12+ | v1alpha2 | +| | v1.4 | 1.19+ | v1alpha2 | +| | v1.5 | 1.20+ | v1alpha2 | +| | v1.6 | 1.23+ | v1, v1alpha2 | + +**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration and Kubernetes only supports n-3 minor release versions. + +These tables should be updated as part of the release preparation process. ### Backporting @@ -115,11 +130,11 @@ will be features for the next _minor_ or _major_ release. For the most part, this process is straightforward and we are here to help make it as smooth as possible. -If there are important fixes that need to be backported, please let use know in +If there are important fixes that need to be backported, please let us know in one of three ways: 1. Open an issue. -2. Open a PR with cherry-picked change from master. +2. Open a PR with cherry-picked change from main. 3. Open a PR with a ported fix. __If you are reporting a security issue, please reach out discreetly at security@containerd.io__. @@ -127,10 +142,10 @@ Remember that backported PRs must follow the versioning guidelines from this doc Any release that is "active" can accept backports. Opening a backport PR is fairly straightforward. The steps differ depending on whether you are pulling -a fix from master or need to draft a new commit specific to a particular +a fix from main or need to draft a new commit specific to a particular branch. -To cherry pick a straightforward commit from master, simply use the cherry pick +To cherry pick a straightforward commit from main, simply use the cherry pick process: 1. Pick the branch to which you want backported, usually in the format @@ -154,7 +169,7 @@ process: ``` Make sure to replace `stevvooe` with whatever fork you are using to open - the PR. When you open the PR, make sure to switch `master` with whatever + the PR. When you open the PR, make sure to switch `main` with whatever release branch you are targeting with the fix. Make sure the PR title has `[]` prefixed. e.g.: @@ -162,11 +177,11 @@ process: [release/1.4] Fix foo in bar ``` -If there is no existing fix in master, you should first fix the bug in master, +If there is no existing fix in main, you should first fix the bug in main, or ask us a maintainer or contributor to do it via an issue. Once that PR is completed, open a PR using the process above. -Only when the bug is not seen in master and must be made for the specific +Only when the bug is not seen in main and must be made for the specific release branch should you open a PR with new code. ## Public API Stability @@ -177,12 +192,12 @@ containerd versions: | Component | Status | Stabilized Version | Links | |------------------|----------|--------------------|---------------| -| GRPC API | Stable | 1.0 | [api/](api) | +| GRPC API | Stable | 1.0 | [gRPC API](#grpc-api) | | Metrics API | Stable | 1.0 | - | | Runtime Shim API | Stable | 1.2 | - | -| Daemon Config | Stable | 1.0 | - | +| Daemon Config | Stable | 1.0 | - | +| CRI GRPC API | Stable | 1.6 (_CRI v1_) | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1) | | Go client API | Unstable | _future_ | [godoc](https://godoc.org/github.com/containerd/containerd) | -| CRI GRPC API | Unstable | v1alpha2 _current_ | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1alpha2) | | `ctr` tool | Unstable | Out of scope | - | From the version stated in the above table, that component must adhere to the @@ -201,7 +216,7 @@ version jump. To ensure compatibility, we have collected the entire GRPC API symbol set into a single file. At each _minor_ release of containerd, we will move the current `next.pb.txt` file to a file named for the minor version, such as `1.0.pb.txt`, -enumerating the support services and messages. See [api/](api) for details. +enumerating the support services and messages. Note that new services may be added in _minor_ releases. New service methods and new fields on messages may be added if they are optional. @@ -321,9 +336,10 @@ against total impact. The deprecated features are shown in the following table: -| Component | Deprecation release | Target release for removal | Recommendation | -|----------------------------------------------------------------------|---------------------|----------------------------|-------------------------------| -| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | -| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | -| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` | -| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter | +| Component | Deprecation release | Target release for removal | Recommendation | +|----------------------------------------------------------------------|---------------------|----------------------------|-----------------------------------| +| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | +| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | +| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` | +| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter | +| `cri-containerd-*.tar.gz` release bundles | containerd v1.6 | containerd v2.0 | Use `containerd-*.tar.gz` bundles | diff --git a/Vagrantfile b/Vagrantfile index 2d790a7..71946f0 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -15,9 +15,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Vagrantfile for cgroup2 and SELinux +# Vagrantfile for Fedora and EL Vagrant.configure("2") do |config| - config.vm.box = "fedora/34-cloud-base" + config.vm.box = ENV["BOX"] || "fedora/37-cloud-base" + config.vm.box_version = ENV["BOX_VERSION"] memory = 4096 cpus = 2 config.vm.provider :virtualbox do |v| @@ -29,6 +30,8 @@ Vagrant.configure("2") do |config| v.cpus = cpus end + config.vm.synced_folder ".", "/vagrant", type: "rsync" + # Disabled by default. To run: # vagrant up --provision-with=upgrade-packages # To upgrade only specific packages: @@ -67,30 +70,41 @@ Vagrant.configure("2") do |config| libselinux-devel \ lsof \ make \ + strace \ ${INSTALL_PACKAGES} SHELL end + # EL does not have /usr/local/{bin,sbin} in the PATH by default + config.vm.provision "setup-etc-environment", type: "shell", run: "once" do |sh| + sh.upload_path = "/tmp/vagrant-setup-etc-environment" + sh.inline = <<~SHELL + #!/usr/bin/env bash + set -eux -o pipefail + cat >> /etc/environment <> /etc/environment <> /etc/profile.d/sh.local < /tmp/containerd.log + cat /tmp/containerd.log systemctl stop containerd } selinux=$(getenforce) @@ -253,7 +270,32 @@ EOF fi trap cleanup EXIT ctr version - critest --parallel=$(nproc) --report-dir="${REPORT_DIR}" --ginkgo.skip='HostIpc is true' + critest --parallel=$[$(nproc)+2] --ginkgo.skip='HostIpc is true' --report-dir="${REPORT_DIR}" + SHELL + end + + # Rootless Podman is used for testing CRI-in-UserNS + # (We could use rootless nerdctl, but we are using Podman here because it is available in dnf) + config.vm.provision "install-rootless-podman", type: "shell", run: "never" do |sh| + sh.upload_path = "/tmp/vagrant-install-rootless-podman" + sh.inline = <<~SHELL + #!/usr/bin/env bash + set -eux -o pipefail + # Delegate cgroup v2 controllers to rootless + mkdir -p /etc/systemd/system/user@.service.d + cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF +[Service] +Delegate=yes +EOF + systemctl daemon-reload + # Install Podman + dnf install -y podman + # Configure Podman to resolve `golang` to `docker.io/library/golang` + mkdir -p /etc/containers + cat > /etc/containers/registries.conf < 0 { + i -= len(m.RuntimePath) + copy(dAtA[i:], m.RuntimePath) + i = encodeVarintTasks(dAtA, i, uint64(len(m.RuntimePath))) + i-- + dAtA[i] = 0x52 + } if m.Options != nil { { size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) @@ -3198,6 +3208,10 @@ func (m *CreateTaskRequest) Size() (n int) { l = m.Options.Size() n += 1 + l + sovTasks(uint64(l)) } + l = len(m.RuntimePath) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3747,6 +3761,7 @@ func (this *CreateTaskRequest) String() string { `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, `Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "types.Descriptor", 1) + `,`, `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, + `RuntimePath:` + fmt.Sprintf("%v", this.RuntimePath) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -4385,6 +4400,38 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTasks + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTasks(dAtA[iNdEx:]) diff --git a/api/services/tasks/v1/tasks.proto b/api/services/tasks/v1/tasks.proto index 2fe72c6..6299c76 100644 --- a/api/services/tasks/v1/tasks.proto +++ b/api/services/tasks/v1/tasks.proto @@ -88,6 +88,8 @@ message CreateTaskRequest { containerd.types.Descriptor checkpoint = 8; google.protobuf.Any options = 9; + + string runtime_path = 10; } message CreateTaskResponse { diff --git a/api/services/ttrpc/events/v1/doc.go b/api/services/ttrpc/events/v1/doc.go index b7f86da..d3d9839 100644 --- a/api/services/ttrpc/events/v1/doc.go +++ b/api/services/ttrpc/events/v1/doc.go @@ -14,5 +14,5 @@ limitations under the License. */ -// Package events defines the event pushing and subscription service. +// Package events defines the ttrpc event service. package events diff --git a/api/services/version/v1/doc.go b/api/services/version/v1/doc.go new file mode 100644 index 0000000..c5c0b85 --- /dev/null +++ b/api/services/version/v1/doc.go @@ -0,0 +1,18 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package version defines the version service. +package version diff --git a/api/types/task/doc.go b/api/types/task/doc.go new file mode 100644 index 0000000..e10c7a4 --- /dev/null +++ b/api/types/task/doc.go @@ -0,0 +1,18 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package task defines the task service. +package task diff --git a/archive/compression/compression.go b/archive/compression/compression.go index a883e4d..ceceb21 100644 --- a/archive/compression/compression.go +++ b/archive/compression/compression.go @@ -21,15 +21,16 @@ import ( "bytes" "compress/gzip" "context" + "encoding/binary" "fmt" "io" "os" - "os/exec" "strconv" "sync" "github.com/containerd/containerd/log" "github.com/klauspost/compress/zstd" + exec "golang.org/x/sys/execabs" ) type ( @@ -125,17 +126,52 @@ func (r *bufferedReader) Peek(n int) ([]byte, error) { return r.buf.Peek(n) } +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + gzipMagic = []byte{0x1F, 0x8B, 0x08} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} + // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Gzip: {0x1F, 0x8B, 0x08}, - Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + for compression, fn := range map[Compression]matcher{ + Gzip: magicNumberMatcher(gzipMagic), + Zstd: zstdMatcher(), } { - if len(source) < len(m) { - // Len too short - continue - } - if bytes.Equal(m, source[:len(m)]) { + if fn(source) { return compression } } diff --git a/archive/compression/compression_test.go b/archive/compression/compression_test.go index 7f00e9f..78daf38 100644 --- a/archive/compression/compression_test.go +++ b/archive/compression/compression_test.go @@ -20,15 +20,15 @@ import ( "bytes" "compress/gzip" "context" + "crypto/rand" "io" - "io/ioutil" - "math/rand" "os" - "os/exec" "path/filepath" "runtime" "strings" "testing" + + exec "golang.org/x/sys/execabs" ) func TestMain(m *testing.M) { @@ -79,7 +79,7 @@ func testCompressDecompress(t *testing.T, size int, compression Compression) Dec if err != nil { t.Fatal(err) } - decompressed, err := ioutil.ReadAll(decompressor) + decompressed, err := io.ReadAll(decompressor) if err != nil { t.Fatal(err) } @@ -122,10 +122,7 @@ func TestCompressDecompressUncompressed(t *testing.T) { func TestDetectPigz(t *testing.T) { // Create fake PATH with unpigz executable, make sure detectPigz can find it - tempPath, err := ioutil.TempDir("", "containerd_temp_") - if err != nil { - t.Fatal(err) - } + tempPath := t.TempDir() filename := "unpigz" if runtime.GOOS == "windows" { @@ -134,15 +131,11 @@ func TestDetectPigz(t *testing.T) { fullPath := filepath.Join(tempPath, filename) - if err := ioutil.WriteFile(fullPath, []byte(""), 0111); err != nil { + if err := os.WriteFile(fullPath, []byte(""), 0111); err != nil { t.Fatal(err) } - defer os.RemoveAll(tempPath) - - oldPath := os.Getenv("PATH") - os.Setenv("PATH", tempPath) - defer os.Setenv("PATH", oldPath) + t.Setenv("PATH", tempPath) if pigzPath := detectPigz(); pigzPath == "" { t.Fatal("failed to detect pigz path") @@ -150,8 +143,7 @@ func TestDetectPigz(t *testing.T) { t.Fatalf("wrong pigz found: %s != %s", pigzPath, fullPath) } - os.Setenv(disablePigzEnv, "1") - defer os.Unsetenv(disablePigzEnv) + t.Setenv(disablePigzEnv, "1") if pigzPath := detectPigz(); pigzPath != "" { t.Fatalf("disable via %s doesn't work", disablePigzEnv) @@ -164,7 +156,7 @@ func TestCmdStream(t *testing.T) { t.Fatal(err) } - buf, err := ioutil.ReadAll(out) + buf, err := io.ReadAll(out) if err != nil { t.Fatalf("failed to read from stdout: %s", err) } @@ -180,7 +172,7 @@ func TestCmdStreamBad(t *testing.T) { t.Fatalf("failed to start command: %v", err) } - if buf, err := ioutil.ReadAll(out); err == nil { + if buf, err := io.ReadAll(out); err == nil { t.Fatal("command should have failed") } else if err.Error() != "exit status 1: bad result\n" { t.Fatalf("wrong error: %s", err.Error()) @@ -188,3 +180,39 @@ func TestCmdStreamBad(t *testing.T) { t.Fatalf("wrong output: %s", string(buf)) } } + +func TestDetectCompressionZstd(t *testing.T) { + for _, tc := range []struct { + source []byte + expected Compression + }{ + { + // test zstd compression without skippable frames. + source: []byte{ + 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 + 0x04, 0x00, 0x31, 0x00, 0x00, // frame header + 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" + 0x16, 0x0e, 0x21, 0xc3, // content checksum + }, + expected: Zstd, + }, + { + // test zstd compression with skippable frames. + source: []byte{ + 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F + 0x04, 0x00, 0x00, 0x00, // frame size + 0x5d, 0x00, 0x00, 0x00, // user data + 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 + 0x04, 0x00, 0x31, 0x00, 0x00, // frame header + 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" + 0x16, 0x0e, 0x21, 0xc3, // content checksum + }, + expected: Zstd, + }, + } { + compression := DetectCompression(tc.source) + if compression != tc.expected { + t.Fatalf("Unexpected compression %v, expected %v", compression, tc.expected) + } + } +} diff --git a/archive/issues_test.go b/archive/issues_test.go index 9d54c7c..de6d090 100644 --- a/archive/issues_test.go +++ b/archive/issues_test.go @@ -19,7 +19,6 @@ package archive import ( "bytes" "context" - "io/ioutil" "os" "path/filepath" "strings" @@ -37,11 +36,7 @@ func TestPrefixHeaderReadable(t *testing.T) { // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") - tmpDir, err := ioutil.TempDir("", "prefix-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() r, err := compression.DecompressStream(bytes.NewReader(testFile)) if err != nil { diff --git a/archive/tar.go b/archive/tar.go index 78a2873..2023db3 100644 --- a/archive/tar.go +++ b/archive/tar.go @@ -19,6 +19,8 @@ package archive import ( "archive/tar" "context" + "errors" + "fmt" "io" "os" "path/filepath" @@ -28,9 +30,10 @@ import ( "syscall" "time" + "github.com/containerd/containerd/archive/tarheader" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" - "github.com/pkg/errors" ) var bufPool = &sync.Pool{ @@ -48,12 +51,15 @@ var errInvalidArchive = errors.New("invalid archive") // Produces a tar using OCI style file markers for deletions. Deleted // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md +// See https://github.com/opencontainers/image-spec/blob/main/layer.md func Diff(ctx context.Context, a, b string) io.ReadCloser { r, w := io.Pipe() go func() { err := WriteDiff(ctx, w, a, b) + if err != nil { + log.G(ctx).WithError(err).Debugf("write diff failed") + } if err = w.CloseWithError(err); err != nil { log.G(ctx).WithError(err).Debugf("closing tar pipe failed") } @@ -68,12 +74,12 @@ func Diff(ctx context.Context, a, b string) io.ReadCloser { // Produces a tar using OCI style file markers for deletions. Deleted // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md +// See https://github.com/opencontainers/image-spec/blob/main/layer.md func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffOpt) error { var options WriteDiffOptions for _, opt := range opts { if err := opt(&options); err != nil { - return errors.Wrap(err, "failed to apply option") + return fmt.Errorf("failed to apply option: %w", err) } } if options.writeDiffFunc == nil { @@ -89,12 +95,12 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO // Produces a tar using OCI style file markers for deletions. Deleted // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md +// See https://github.com/opencontainers/image-spec/blob/main/layer.md func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOptions) error { - cw := newChangeWriter(w, b) + cw := NewChangeWriter(w, b) err := fs.Changes(ctx, a, b, cw.HandleChange) if err != nil { - return errors.Wrap(err, "failed to create diff tar stream") + return fmt.Errorf("failed to create diff tar stream: %w", err) } return cw.Close() } @@ -102,7 +108,7 @@ func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOp const ( // whiteoutPrefix prefix means file is a whiteout. If this is followed by a // filename this means that file has been removed from the base layer. - // See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts + // See https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts whiteoutPrefix = ".wh." // whiteoutMetaPrefix prefix means whiteout has a special meaning and is not @@ -115,17 +121,19 @@ const ( whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" paxSchilyXattr = "SCHILY.xattr." + + userXattrPrefix = "user." ) // Apply applies a tar stream of an OCI style diff tar. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) { root = filepath.Clean(root) var options ApplyOptions for _, opt := range opts { if err := opt(&options); err != nil { - return 0, errors.Wrap(err, "failed to apply option") + return 0, fmt.Errorf("failed to apply option: %w", err) } } if options.Filter == nil { @@ -140,7 +148,7 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int // applyNaive applies a tar stream of an OCI style diff tar to a directory // applying each file as either a whole file or whiteout. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) { var ( dirs []*tar.Header @@ -233,7 +241,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti ppath, base := filepath.Split(hdr.Name) ppath, err = fs.RootPath(root, ppath) if err != nil { - return 0, errors.Wrap(err, "failed to get root path") + return 0, fmt.Errorf("failed to get root path: %w", err) } // Join to root before joining to parent path to ensure relative links are @@ -263,7 +271,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti } writeFile, err := convertWhiteout(hdr, path) if err != nil { - return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name) + return 0, fmt.Errorf("failed to convert whiteout file %q: %w", hdr.Name, err) } if !writeFile { continue @@ -324,6 +332,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header } } + //nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA case tar.TypeReg, tar.TypeRegA: file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode()) if err != nil { @@ -370,12 +379,16 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header return nil default: - return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag) + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if runtime.GOOS != "windows" { if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err) + if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { + err = fmt.Errorf("%w (Hint: try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)", err) + } return err } } @@ -384,11 +397,19 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header if strings.HasPrefix(key, paxSchilyXattr) { key = key[len(paxSchilyXattr):] if err := setxattr(path, key, value); err != nil { + if errors.Is(err, syscall.EPERM) && strings.HasPrefix(key, userXattrPrefix) { + // In the user.* namespace, only regular files and directories can have extended attributes. + // See https://man7.org/linux/man-pages/man7/xattr.7.html for details. + if fi, err := os.Lstat(path); err == nil && (!fi.Mode().IsRegular() && !fi.Mode().IsDir()) { + log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) + continue + } + } if errors.Is(err, syscall.ENOTSUP) { log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) continue } - return err + return fmt.Errorf("failed to setxattr %q for key %q: %w", path, key, err) } } } @@ -461,7 +482,17 @@ func mkparent(ctx context.Context, path, root string, parents []string) error { return nil } -type changeWriter struct { +// ChangeWriter provides tar stream from filesystem change information. +// The privided tar stream is styled as an OCI layer. Change information +// (add/modify/delete/unmodified) for each file needs to be passed to this +// writer through HandleChange method. +// +// This should be used combining with continuity's diff computing functionality +// (e.g. `fs.Change` of github.com/containerd/continuity/fs). +// +// See also https://github.com/opencontainers/image-spec/blob/main/layer.md for details +// about OCI layers +type ChangeWriter struct { tw *tar.Writer source string whiteoutT time.Time @@ -470,8 +501,11 @@ type changeWriter struct { addedDirs map[string]struct{} } -func newChangeWriter(w io.Writer, source string) *changeWriter { - return &changeWriter{ +// NewChangeWriter returns ChangeWriter that writes tar stream of the source directory +// to the privided writer. Change information (add/modify/delete/unmodified) for each +// file needs to be passed through HandleChange method. +func NewChangeWriter(w io.Writer, source string) *ChangeWriter { + return &ChangeWriter{ tw: tar.NewWriter(w), source: source, whiteoutT: time.Now(), @@ -481,7 +515,10 @@ func newChangeWriter(w io.Writer, source string) *changeWriter { } } -func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error { +// HandleChange receives filesystem change information and reflect that information to +// the result tar stream. This function implements `fs.ChangeFunc` of continuity +// (github.com/containerd/continuity/fs) and should be used with that package. +func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error { if err != nil { return err } @@ -501,7 +538,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e return err } if err := cw.tw.WriteHeader(hdr); err != nil { - return errors.Wrap(err, "failed to write whiteout header") + return fmt.Errorf("failed to write whiteout header: %w", err) } } else { var ( @@ -519,7 +556,8 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e } } - hdr, err := tar.FileInfoHeader(f, link) + // Use FileInfoHeaderNoLookups to avoid propagating user names and group names from the host + hdr, err := tarheader.FileInfoHeaderNoLookups(f, link) if err != nil { return err } @@ -536,12 +574,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e if strings.HasPrefix(name, string(filepath.Separator)) { name, err = filepath.Rel(string(filepath.Separator), name) if err != nil { - return errors.Wrap(err, "failed to make path relative") + return fmt.Errorf("failed to make path relative: %w", err) } } name, err = tarName(name) if err != nil { - return errors.Wrap(err, "cannot canonicalize path") + return fmt.Errorf("cannot canonicalize path: %w", err) } // suffix with '/' for directories if f.IsDir() && !strings.HasSuffix(name, "/") { @@ -550,7 +588,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e hdr.Name = name if err := setHeaderForSpecialDevice(hdr, name, f); err != nil { - return errors.Wrap(err, "failed to set device headers") + return fmt.Errorf("failed to set device headers: %w", err) } // additionalLinks stores file names which must be linked to @@ -578,8 +616,8 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e } if capability, err := getxattr(source, "security.capability"); err != nil { - return errors.Wrap(err, "failed to get capabilities xattr") - } else if capability != nil { + return fmt.Errorf("failed to get capabilities xattr: %w", err) + } else if len(capability) > 0 { if hdr.PAXRecords == nil { hdr.PAXRecords = map[string]string{} } @@ -590,19 +628,19 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e return err } if err := cw.tw.WriteHeader(hdr); err != nil { - return errors.Wrap(err, "failed to write file header") + return fmt.Errorf("failed to write file header: %w", err) } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { file, err := open(source) if err != nil { - return errors.Wrapf(err, "failed to open path: %v", source) + return fmt.Errorf("failed to open path: %v: %w", source, err) } defer file.Close() n, err := copyBuffered(context.TODO(), cw.tw, file) if err != nil { - return errors.Wrap(err, "failed to copy") + return fmt.Errorf("failed to copy: %w", err) } if n != hdr.Size { return errors.New("short write copying file") @@ -621,7 +659,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e return err } if err := cw.tw.WriteHeader(hdr); err != nil { - return errors.Wrap(err, "failed to write file header") + return fmt.Errorf("failed to write file header: %w", err) } } } @@ -629,14 +667,15 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e return nil } -func (cw *changeWriter) Close() error { +// Close closes this writer. +func (cw *ChangeWriter) Close() error { if err := cw.tw.Close(); err != nil { - return errors.Wrap(err, "failed to close tar writer") + return fmt.Errorf("failed to close tar writer: %w", err) } return nil } -func (cw *changeWriter) includeParents(hdr *tar.Header) error { +func (cw *ChangeWriter) includeParents(hdr *tar.Header) error { if cw.addedDirs == nil { return nil } @@ -744,7 +783,7 @@ func validateWhiteout(path string) error { dir += string(filepath.Separator) } if !strings.HasPrefix(originalPath, dir) { - return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base) + return fmt.Errorf("invalid whiteout name: %v: %w", base, errInvalidArchive) } } return nil diff --git a/archive/tar_freebsd.go b/archive/tar_freebsd.go index ce4dffd..fb5abff 100644 --- a/archive/tar_freebsd.go +++ b/archive/tar_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/archive/tar_linux_test.go b/archive/tar_linux_test.go index 2ef8d43..bfe056f 100644 --- a/archive/tar_linux_test.go +++ b/archive/tar_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -23,7 +21,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "strings" "testing" @@ -34,17 +31,12 @@ import ( "github.com/containerd/containerd/snapshots/overlay/overlayutils" "github.com/containerd/continuity/fs" "github.com/containerd/continuity/fs/fstest" - "github.com/pkg/errors" ) func TestOverlayApply(t *testing.T) { testutil.RequiresRoot(t) - base, err := ioutil.TempDir("", "test-ovl-diff-apply-") - if err != nil { - t.Fatalf("unable to create temp dir: %+v", err) - } - defer os.RemoveAll(base) + base := t.TempDir() if err := overlayutils.Supported(base); err != nil { t.Skipf("skipping because overlay is not supported %v", err) @@ -59,11 +51,7 @@ func TestOverlayApply(t *testing.T) { func TestOverlayApplyNoParents(t *testing.T) { testutil.RequiresRoot(t) - base, err := ioutil.TempDir("", "test-ovl-diff-apply-") - if err != nil { - t.Fatalf("unable to create temp dir: %+v", err) - } - defer os.RemoveAll(base) + base := t.TempDir() if err := overlayutils.Supported(base); err != nil { t.Skipf("skipping because overlay is not supported %v", err) @@ -71,11 +59,11 @@ func TestOverlayApplyNoParents(t *testing.T) { fstest.FSSuite(t, overlayDiffApplier{ tmp: base, diff: func(ctx context.Context, w io.Writer, a, b string, _ ...WriteDiffOpt) error { - cw := newChangeWriter(w, b) + cw := NewChangeWriter(w, b) cw.addedDirs = nil err := fs.Changes(ctx, a, b, cw.HandleChange) if err != nil { - return errors.Wrap(err, "failed to create diff tar stream") + return fmt.Errorf("failed to create diff tar stream: %w", err) } return cw.Close() }, @@ -98,9 +86,9 @@ type overlayContext struct { type contextKey struct{} func (d overlayDiffApplier) TestContext(ctx context.Context) (context.Context, func(), error) { - merged, err := ioutil.TempDir(d.tmp, "merged") + merged, err := os.MkdirTemp(d.tmp, "merged") if err != nil { - return ctx, nil, errors.Wrap(err, "failed to make merged dir") + return ctx, nil, fmt.Errorf("failed to make merged dir: %w", err) } oc := &overlayContext{ @@ -119,9 +107,9 @@ func (d overlayDiffApplier) TestContext(ctx context.Context) (context.Context, f func (d overlayDiffApplier) Apply(ctx context.Context, a fstest.Applier) (string, func(), error) { oc := ctx.Value(contextKey{}).(*overlayContext) - applyCopy, err := ioutil.TempDir(d.tmp, "apply-copy-") + applyCopy, err := os.MkdirTemp(d.tmp, "apply-copy-") if err != nil { - return "", nil, errors.Wrap(err, "failed to create temp dir") + return "", nil, fmt.Errorf("failed to create temp dir: %w", err) } defer os.RemoveAll(applyCopy) @@ -131,33 +119,33 @@ func (d overlayDiffApplier) Apply(ctx context.Context, a fstest.Applier) (string } if err = fs.CopyDir(applyCopy, base); err != nil { - return "", nil, errors.Wrap(err, "failed to copy base") + return "", nil, fmt.Errorf("failed to copy base: %w", err) } if err := a.Apply(applyCopy); err != nil { - return "", nil, errors.Wrap(err, "failed to apply changes to copy of base") + return "", nil, fmt.Errorf("failed to apply changes to copy of base: %w", err) } buf := bytes.NewBuffer(nil) if err := d.diff(ctx, buf, base, applyCopy); err != nil { - return "", nil, errors.Wrap(err, "failed to create diff") + return "", nil, fmt.Errorf("failed to create diff: %w", err) } if oc.mounted { if err := mount.Unmount(oc.merged, 0); err != nil { - return "", nil, errors.Wrap(err, "failed to unmount") + return "", nil, fmt.Errorf("failed to unmount: %w", err) } oc.mounted = false } - next, err := ioutil.TempDir(d.tmp, "lower-") + next, err := os.MkdirTemp(d.tmp, "lower-") if err != nil { - return "", nil, errors.Wrap(err, "failed to create temp dir") + return "", nil, fmt.Errorf("failed to create temp dir: %w", err) } if _, err = Apply(ctx, next, buf, WithConvertWhiteout(OverlayConvertWhiteout), WithParents(oc.lowers)); err != nil { - return "", nil, errors.Wrap(err, "failed to apply tar stream") + return "", nil, fmt.Errorf("failed to apply tar stream: %w", err) } oc.lowers = append([]string{next}, oc.lowers...) @@ -175,7 +163,7 @@ func (d overlayDiffApplier) Apply(ctx context.Context, a fstest.Applier) (string } if err := m.Mount(oc.merged); err != nil { - return "", nil, errors.Wrapf(err, "failed to mount: %v", m) + return "", nil, fmt.Errorf("failed to mount: %v: %w", m, err) } oc.mounted = true diff --git a/archive/tar_mostunix.go b/archive/tar_mostunix.go index 9cd1f0f..d2d9703 100644 --- a/archive/tar_mostunix.go +++ b/archive/tar_mostunix.go @@ -1,3 +1,4 @@ +//go:build !windows && !freebsd // +build !windows,!freebsd /* diff --git a/archive/tar_opts_linux.go b/archive/tar_opts_linux.go index 38ef9e9..f88d826 100644 --- a/archive/tar_opts_linux.go +++ b/archive/tar_opts_linux.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. diff --git a/archive/tar_opts_windows.go b/archive/tar_opts_windows.go index d3b8f4f..0ba3cd0 100644 --- a/archive/tar_opts_windows.go +++ b/archive/tar_opts_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -26,7 +24,7 @@ import ( ) // applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer -// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets func applyWindowsLayer(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) { return ociwclayer.ImportLayerFromTar(ctx, r, root, options.Parents) } @@ -47,7 +45,7 @@ func AsWindowsContainerLayer() ApplyOpt { // Produces a tar using OCI style file markers for deletions. Deleted // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md +// See https://github.com/opencontainers/image-spec/blob/main/layer.md func writeDiffWindowsLayers(ctx context.Context, w io.Writer, _, layer string, options WriteDiffOptions) error { return ociwclayer.ExportLayerToTar(ctx, w, layer, options.ParentLayers) } diff --git a/archive/tar_test.go b/archive/tar_test.go index dcea0b4..3ffb697 100644 --- a/archive/tar_test.go +++ b/archive/tar_test.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin // +build !windows,!darwin /* @@ -22,22 +23,20 @@ import ( "archive/tar" "bytes" "context" + _ "crypto/sha256" + "errors" "fmt" "io" - "io/ioutil" "os" - "os/exec" "path/filepath" "testing" "time" - _ "crypto/sha256" - "github.com/containerd/containerd/archive/tartest" "github.com/containerd/containerd/pkg/testutil" "github.com/containerd/continuity/fs" "github.com/containerd/continuity/fs/fstest" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) const tarCmd = "tar" @@ -58,7 +57,7 @@ var baseApplier = fstest.Apply( func TestUnpack(t *testing.T) { requireTar(t) - if err := testApply(baseApplier); err != nil { + if err := testApply(t, baseApplier); err != nil { t.Fatalf("Test apply failed: %+v", err) } } @@ -66,7 +65,7 @@ func TestUnpack(t *testing.T) { func TestBaseDiff(t *testing.T) { requireTar(t) - if err := testBaseDiff(baseApplier); err != nil { + if err := testBaseDiff(t, baseApplier); err != nil { t.Fatalf("Test base diff failed: %+v", err) } } @@ -102,7 +101,7 @@ func TestRelativeSymlinks(t *testing.T) { } for _, bo := range breakoutLinks { - if err := testDiffApply(bo); err != nil { + if err := testDiffApply(t, bo); err != nil { t.Fatalf("Test apply failed: %+v", err) } } @@ -179,7 +178,7 @@ func TestSymlinks(t *testing.T) { } for i, l := range links { - if err := testDiffApply(l[0], l[1]); err != nil { + if err := testDiffApply(t, l[0], l[1]); err != nil { t.Fatalf("Test[%d] apply failed: %+v", i+1, err) } } @@ -233,21 +232,17 @@ func TestBreakouts(t *testing.T) { tc := tartest.TarContext{}.WithUIDGID(os.Getuid(), os.Getgid()).WithModTime(time.Now().UTC()) expected := "unbroken" unbrokenCheck := func(root string) error { - b, err := ioutil.ReadFile(filepath.Join(root, "etc", "unbroken")) + b, err := os.ReadFile(filepath.Join(root, "etc", "unbroken")) if err != nil { - return errors.Wrap(err, "failed to read unbroken") + return fmt.Errorf("failed to read unbroken: %w", err) } if string(b) != expected { - return errors.Errorf("/etc/unbroken: unexpected value %s, expected %s", b, expected) + return fmt.Errorf("/etc/unbroken: unexpected value %s, expected %s", b, expected) } return nil } errFileDiff := errors.New("files differ") - td, err := ioutil.TempDir("", "test-breakouts-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) + td := t.TempDir() isSymlinkFile := func(f string) func(string) error { return func(root string) error { @@ -257,7 +252,7 @@ func TestBreakouts(t *testing.T) { } if got := fi.Mode() & os.ModeSymlink; got != os.ModeSymlink { - return errors.Errorf("%s should be symlink", fi.Name()) + return fmt.Errorf("%s should be symlink", fi.Name()) } return nil } @@ -285,7 +280,7 @@ func TestBreakouts(t *testing.T) { } if t1 != t2 { - return errors.Wrapf(errFileDiff, "%#v and %#v", t1, t2) + return fmt.Errorf("%#v and %#v: %w", t1, t2, errFileDiff) } return nil } @@ -310,7 +305,7 @@ func TestBreakouts(t *testing.T) { return err } if !os.SameFile(s1, s2) { - return errors.Wrapf(errFileDiff, "%#v and %#v", s1, s2) + return fmt.Errorf("%#v and %#v: %w", s1, s2, errFileDiff) } return nil } @@ -330,12 +325,12 @@ func TestBreakouts(t *testing.T) { } fileValue := func(f1 string, content []byte) func(string) error { return func(root string) error { - b, err := ioutil.ReadFile(filepath.Join(root, f1)) + b, err := os.ReadFile(filepath.Join(root, f1)) if err != nil { return err } if !bytes.Equal(b, content) { - return errors.Errorf("content differs: expected %v, got %v", content, b) + return fmt.Errorf("content differs: expected %v, got %v", content, b) } return nil } @@ -420,12 +415,12 @@ func TestBreakouts(t *testing.T) { tc.File("/localetc/emptied", []byte{}, 0644), ), validator: func(root string) error { - b, err := ioutil.ReadFile(filepath.Join(root, "etc", "emptied")) + b, err := os.ReadFile(filepath.Join(root, "etc", "emptied")) if err != nil { - return errors.Wrap(err, "failed to read unbroken") + return fmt.Errorf("failed to read unbroken: %w", err) } if len(b) > 0 { - return errors.Errorf("/etc/emptied: non-empty") + return errors.New("/etc/emptied: non-empty") } return nil }, @@ -754,11 +749,11 @@ func TestBreakouts(t *testing.T) { name: "HardlinkSymlinkChmod", w: func() tartest.WriterToTar { p := filepath.Join(td, "perm400") - if err := ioutil.WriteFile(p, []byte("..."), 0400); err != nil { + if err := os.WriteFile(p, []byte("..."), 0400); err != nil { t.Fatal(err) } ep := filepath.Join(td, "also-exists-outside-root") - if err := ioutil.WriteFile(ep, []byte("..."), 0640); err != nil { + if err := os.WriteFile(ep, []byte("..."), 0640); err != nil { t.Fatal(err) } @@ -774,7 +769,7 @@ func TestBreakouts(t *testing.T) { return err } if perm := fi.Mode() & os.ModePerm; perm != 0400 { - return errors.Errorf("%s perm changed from 0400 to %04o", p, perm) + return fmt.Errorf("%s perm changed from 0400 to %04o", p, perm) } return nil }, @@ -800,7 +795,7 @@ func TestApplyTar(t *testing.T) { return err } if _, err := os.Stat(p); err != nil { - return errors.Wrapf(err, "failure checking existence for %v", d) + return fmt.Errorf("failure checking existence for %v: %w", d, err) } } return nil @@ -841,26 +836,18 @@ func TestApplyTar(t *testing.T) { } } -func testApply(a fstest.Applier) error { - td, err := ioutil.TempDir("", "test-apply-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(td) - dest, err := ioutil.TempDir("", "test-apply-dest-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(dest) +func testApply(t *testing.T, a fstest.Applier) error { + td := t.TempDir() + dest := t.TempDir() if err := a.Apply(td); err != nil { - return errors.Wrap(err, "failed to apply filesystem changes") + return fmt.Errorf("failed to apply filesystem changes: %w", err) } tarArgs := []string{"c", "-C", td} names, err := readDirNames(td) if err != nil { - return errors.Wrap(err, "failed to read directory names") + return fmt.Errorf("failed to read directory names: %w", err) } tarArgs = append(tarArgs, names...) @@ -868,34 +855,26 @@ func testApply(a fstest.Applier) error { arch, err := cmd.StdoutPipe() if err != nil { - return errors.Wrap(err, "failed to create stdout pipe") + return fmt.Errorf("failed to create stdout pipe: %w", err) } if err := cmd.Start(); err != nil { - return errors.Wrap(err, "failed to start command") + return fmt.Errorf("failed to start command: %w", err) } if _, err := Apply(context.Background(), dest, arch); err != nil { - return errors.Wrap(err, "failed to apply tar stream") + return fmt.Errorf("failed to apply tar stream: %w", err) } return fstest.CheckDirectoryEqual(td, dest) } -func testBaseDiff(a fstest.Applier) error { - td, err := ioutil.TempDir("", "test-base-diff-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(td) - dest, err := ioutil.TempDir("", "test-base-diff-dest-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(dest) +func testBaseDiff(t *testing.T, a fstest.Applier) error { + td := t.TempDir() + dest := t.TempDir() if err := a.Apply(td); err != nil { - return errors.Wrap(err, "failed to apply filesystem changes") + return fmt.Errorf("failed to apply filesystem changes: %w", err) } arch := Diff(context.Background(), "", td) @@ -903,27 +882,19 @@ func testBaseDiff(a fstest.Applier) error { cmd := exec.Command(tarCmd, "x", "-C", dest) cmd.Stdin = arch if err := cmd.Run(); err != nil { - return errors.Wrap(err, "tar command failed") + return fmt.Errorf("tar command failed: %w", err) } return fstest.CheckDirectoryEqual(td, dest) } -func testDiffApply(appliers ...fstest.Applier) error { - td, err := ioutil.TempDir("", "test-diff-apply-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(td) - dest, err := ioutil.TempDir("", "test-diff-apply-dest-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(dest) +func testDiffApply(t *testing.T, appliers ...fstest.Applier) error { + td := t.TempDir() + dest := t.TempDir() for _, a := range appliers { if err := a.Apply(td); err != nil { - return errors.Wrap(err, "failed to apply filesystem changes") + return fmt.Errorf("failed to apply filesystem changes: %w", err) } } @@ -931,18 +902,18 @@ func testDiffApply(appliers ...fstest.Applier) error { if len(appliers) > 1 { for _, a := range appliers[:len(appliers)-1] { if err := a.Apply(dest); err != nil { - return errors.Wrap(err, "failed to apply base filesystem changes") + return fmt.Errorf("failed to apply base filesystem changes: %w", err) } } } - diffBytes, err := ioutil.ReadAll(Diff(context.Background(), dest, td)) + diffBytes, err := io.ReadAll(Diff(context.Background(), dest, td)) if err != nil { - return errors.Wrap(err, "failed to create diff") + return fmt.Errorf("failed to create diff: %w", err) } if _, err := Apply(context.Background(), dest, bytes.NewReader(diffBytes)); err != nil { - return errors.Wrap(err, "failed to apply tar stream") + return fmt.Errorf("failed to apply tar stream: %w", err) } return fstest.CheckDirectoryEqual(td, dest) @@ -950,11 +921,7 @@ func testDiffApply(appliers ...fstest.Applier) error { func makeWriterToTarTest(wt tartest.WriterToTar, a fstest.Applier, validate func(string) error, applyErr error) func(*testing.T) { return func(t *testing.T) { - td, err := ioutil.TempDir("", "test-writer-to-tar-") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(td) + td := t.TempDir() if a != nil { if err := a.Apply(td); err != nil { @@ -1194,10 +1161,10 @@ func dirEntry(name string, mode int) tarEntryValidator { return errors.New("not directory type") } if hdr.Name != name { - return errors.Errorf("wrong name %q, expected %q", hdr.Name, name) + return fmt.Errorf("wrong name %q, expected %q", hdr.Name, name) } if hdr.Mode != int64(mode) { - return errors.Errorf("wrong mode %o, expected %o", hdr.Mode, mode) + return fmt.Errorf("wrong mode %o, expected %o", hdr.Mode, mode) } return nil } @@ -1209,13 +1176,13 @@ func fileEntry(name string, expected []byte, mode int) tarEntryValidator { return errors.New("not file type") } if hdr.Name != name { - return errors.Errorf("wrong name %q, expected %q", hdr.Name, name) + return fmt.Errorf("wrong name %q, expected %q", hdr.Name, name) } if hdr.Mode != int64(mode) { - return errors.Errorf("wrong mode %o, expected %o", hdr.Mode, mode) + return fmt.Errorf("wrong mode %o, expected %o", hdr.Mode, mode) } if !bytes.Equal(b, expected) { - return errors.Errorf("different file content") + return errors.New("different file content") } return nil } @@ -1227,10 +1194,10 @@ func linkEntry(name, link string) tarEntryValidator { return errors.New("not link type") } if hdr.Name != name { - return errors.Errorf("wrong name %q, expected %q", hdr.Name, name) + return fmt.Errorf("wrong name %q, expected %q", hdr.Name, name) } if hdr.Linkname != link { - return errors.Errorf("wrong link %q, expected %q", hdr.Linkname, link) + return fmt.Errorf("wrong link %q, expected %q", hdr.Linkname, link) } return nil } @@ -1243,10 +1210,10 @@ func whiteoutEntry(name string) tarEntryValidator { return func(hdr *tar.Header, b []byte) error { if hdr.Typeflag != tar.TypeReg { - return errors.Errorf("not file type: %q", hdr.Typeflag) + return fmt.Errorf("not file type: %q", hdr.Typeflag) } if hdr.Name != whiteOut { - return errors.Errorf("wrong name %q, expected whiteout %q", hdr.Name, name) + return fmt.Errorf("wrong name %q, expected whiteout %q", hdr.Name, name) } return nil } @@ -1254,20 +1221,12 @@ func whiteoutEntry(name string) tarEntryValidator { func makeDiffTarTest(validators []tarEntryValidator, a, b fstest.Applier) func(*testing.T) { return func(t *testing.T) { - ad, err := ioutil.TempDir("", "test-make-diff-tar-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer os.RemoveAll(ad) + ad := t.TempDir() if err := a.Apply(ad); err != nil { t.Fatalf("failed to apply a: %v", err) } - bd, err := ioutil.TempDir("", "test-make-diff-tar-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer os.RemoveAll(bd) + bd := t.TempDir() if err := fs.CopyDir(bd, ad); err != nil { t.Fatalf("failed to copy dir: %v", err) } @@ -1289,7 +1248,7 @@ func makeDiffTarTest(validators []tarEntryValidator, a, b fstest.Applier) func(* } var b []byte if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - b, err = ioutil.ReadAll(tr) + b, err = io.ReadAll(tr) if err != nil { t.Fatalf("tar read file error: %v", err) } @@ -1307,9 +1266,9 @@ func makeDiffTarTest(validators []tarEntryValidator, a, b fstest.Applier) func(* type diffApplier struct{} func (d diffApplier) TestContext(ctx context.Context) (context.Context, func(), error) { - base, err := ioutil.TempDir("", "test-diff-apply-") + base, err := os.MkdirTemp("", "test-diff-apply-") if err != nil { - return ctx, nil, errors.Wrap(err, "failed to create temp dir") + return ctx, nil, fmt.Errorf("failed to create temp dir: %w", err) } return context.WithValue(ctx, d, base), func() { os.RemoveAll(base) @@ -1319,32 +1278,32 @@ func (d diffApplier) TestContext(ctx context.Context) (context.Context, func(), func (d diffApplier) Apply(ctx context.Context, a fstest.Applier) (string, func(), error) { base := ctx.Value(d).(string) - applyCopy, err := ioutil.TempDir("", "test-diffapply-apply-copy-") + applyCopy, err := os.MkdirTemp("", "test-diffapply-apply-copy-") if err != nil { - return "", nil, errors.Wrap(err, "failed to create temp dir") + return "", nil, fmt.Errorf("failed to create temp dir: %w", err) } defer os.RemoveAll(applyCopy) if err = fs.CopyDir(applyCopy, base); err != nil { - return "", nil, errors.Wrap(err, "failed to copy base") + return "", nil, fmt.Errorf("failed to copy base: %w", err) } if err := a.Apply(applyCopy); err != nil { - return "", nil, errors.Wrap(err, "failed to apply changes to copy of base") + return "", nil, fmt.Errorf("failed to apply changes to copy of base: %w", err) } - diffBytes, err := ioutil.ReadAll(Diff(ctx, base, applyCopy)) + diffBytes, err := io.ReadAll(Diff(ctx, base, applyCopy)) if err != nil { - return "", nil, errors.Wrap(err, "failed to create diff") + return "", nil, fmt.Errorf("failed to create diff: %w", err) } if _, err = Apply(ctx, base, bytes.NewReader(diffBytes)); err != nil { - return "", nil, errors.Wrap(err, "failed to apply tar stream") + return "", nil, fmt.Errorf("failed to apply tar stream: %w", err) } return base, nil, nil } func readDirNames(p string) ([]string, error) { - fis, err := ioutil.ReadDir(p) + fis, err := os.ReadDir(p) if err != nil { return nil, err } diff --git a/archive/tar_unix.go b/archive/tar_unix.go index cd2be74..d84dfd8 100644 --- a/archive/tar_unix.go +++ b/archive/tar_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,14 +21,16 @@ package archive import ( "archive/tar" + "errors" + "fmt" "os" + "runtime" "strings" "syscall" "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -40,13 +43,26 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { } func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error { + // Devmajor and Devminor are only needed for special devices. + + // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): + // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 + // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). + + // ZFS in particular does not override the default: + // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 + + // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). + // Such large values cannot be encoded in a tar header. + if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { + return nil + } s, ok := fi.Sys().(*syscall.Stat_t) if !ok { return errors.New("unsupported stat type") } - // Rdev is int32 on darwin/bsd, int64 on linux/solaris - rdev := uint64(s.Rdev) // nolint: unconvert + rdev := uint64(s.Rdev) //nolint:nolintlint,unconvert // rdev is int32 on darwin/bsd, int64 on linux/solaris // Currently go does not fill in the major/minors if s.Mode&syscall.S_IFBLK != 0 || @@ -69,6 +85,7 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { } // Call chmod to avoid permission mask if err := os.Chmod(name, perm); err != nil { + f.Close() return nil, err } return f, err @@ -122,7 +139,7 @@ func getxattr(path, attr string) ([]byte, error) { func setxattr(path, key, value string) error { // Do not set trusted attributes if strings.HasPrefix(key, "trusted.") { - return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported") + return fmt.Errorf("admin attributes from archive not supported: %w", unix.ENOTSUP) } return unix.Lsetxattr(path, key, []byte(value), 0) } @@ -142,12 +159,12 @@ func copyDirInfo(fi os.FileInfo, path string) error { } } if err != nil { - return errors.Wrapf(err, "failed to chown %s", path) + return fmt.Errorf("failed to chown %s: %w", path, err) } } if err := os.Chmod(path, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", path) + return fmt.Errorf("failed to chmod %s: %w", path, err) } timespec := []unix.Timespec{ @@ -155,7 +172,7 @@ func copyDirInfo(fi os.FileInfo, path string) error { unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatMtime(st))), } if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", path) + return fmt.Errorf("failed to utime %s: %w", path, err) } return nil @@ -167,7 +184,7 @@ func copyUpXAttrs(dst, src string) error { if err == unix.ENOTSUP || err == sysx.ENODATA { return nil } - return errors.Wrapf(err, "failed to list xattrs on %s", src) + return fmt.Errorf("failed to list xattrs on %s: %w", src, err) } for _, xattr := range xattrKeys { // Do not copy up trusted attributes @@ -179,10 +196,10 @@ func copyUpXAttrs(dst, src string) error { if err == unix.ENOTSUP || err == sysx.ENODATA { continue } - return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + return fmt.Errorf("failed to get xattr %q on %s: %w", xattr, src, err) } if err := lsetxattrCreate(dst, xattr, data); err != nil { - return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + return fmt.Errorf("failed to set xattr %q on %s: %w", xattr, dst, err) } } diff --git a/archive/tar_windows.go b/archive/tar_windows.go index 3184070..4b71c1e 100644 --- a/archive/tar_windows.go +++ b/archive/tar_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,12 +18,12 @@ package archive import ( "archive/tar" + "errors" "fmt" "os" "strings" "github.com/containerd/containerd/sys" - "github.com/pkg/errors" ) // tarName returns platform-specific filepath @@ -114,7 +112,7 @@ func setxattr(path, key, value string) error { func copyDirInfo(fi os.FileInfo, path string) error { if err := os.Chmod(path, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", path) + return fmt.Errorf("failed to chmod %s: %w", path, err) } return nil } diff --git a/archive/tarheader/tarheader.go b/archive/tarheader/tarheader.go new file mode 100644 index 0000000..2f93842 --- /dev/null +++ b/archive/tarheader/tarheader.go @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Portions from https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L419-L464 + Copyright (C) Docker/Moby authors. + Licensed under the Apache License, Version 2.0 + NOTICE: https://github.com/moby/moby/blob/v23.0.1/NOTICE +*/ + +package tarheader + +import ( + "archive/tar" + "os" +) + +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +// +// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L419-L434 . +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStat, if non-nil, populates hdr from system-dependent fields of fi. +// +// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L436-L437 . +var sysStat func(fi os.FileInfo, hdr *tar.Header) error + +// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. +// +// Compared to the archive/tar.FileInfoHeader function, this function is safe to +// call from a chrooted process as it does not populate fields which would +// require operating system lookups. It behaves identically to +// tar.FileInfoHeader when fi is a FileInfo value returned from +// tar.Header.FileInfo(). +// +// When fi is a FileInfo for a native file, such as returned from os.Stat() and +// os.Lstat(), the returned Header value differs from one returned from +// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not +// set as OS lookups would be required to populate them. The AccessTime and +// ChangeTime fields are not currently set (not yet implemented) although that +// is subject to change. Callers which require the AccessTime or ChangeTime +// fields to be zeroed should explicitly zero them out in the returned Header +// value to avoid any compatibility issues in the future. +// +// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L439-L464 . +func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + if sysStat != nil { + return hdr, sysStat(fi, hdr) + } + return hdr, nil +} diff --git a/archive/tarheader/tarheader_unix.go b/archive/tarheader/tarheader_unix.go new file mode 100644 index 0000000..98ad8f9 --- /dev/null +++ b/archive/tarheader/tarheader_unix.go @@ -0,0 +1,59 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Portions from https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive_unix.go#L52-L70 + Copyright (C) Docker/Moby authors. + Licensed under the Apache License, Version 2.0 + NOTICE: https://github.com/moby/moby/blob/v23.0.1/NOTICE +*/ + +package tarheader + +import ( + "archive/tar" + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +func init() { + sysStat = statUnix +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive_unix.go#L52-L70 +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) + } + + return nil +} diff --git a/archive/time_unix.go b/archive/time_unix.go index e05ca71..043e374 100644 --- a/archive/time_unix.go +++ b/archive/time_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,11 +20,10 @@ package archive import ( + "fmt" "time" "golang.org/x/sys/unix" - - "github.com/pkg/errors" ) func chtimes(path string, atime, mtime time.Time) error { @@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error { utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path) + return fmt.Errorf("failed call to UtimesNanoAt for %s: %w", path, err) } return nil diff --git a/cio/io_test.go b/cio/io_test.go index 3f4ceb9..3df46a7 100644 --- a/cio/io_test.go +++ b/cio/io_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -22,7 +23,6 @@ import ( "bytes" "context" "io" - "io/ioutil" "net/url" "os" "path/filepath" @@ -49,9 +49,7 @@ func TestNewFIFOSetInDir(t *testing.T) { t.Skip("NewFIFOSetInDir has different behaviour on windows") } - root, err := ioutil.TempDir("", "test-new-fifo-set") - assert.NilError(t, err) - defer os.RemoveAll(root) + root := t.TempDir() fifos, err := NewFIFOSetInDir(root, "theid", true) assert.NilError(t, err) @@ -68,12 +66,12 @@ func TestNewFIFOSetInDir(t *testing.T) { } assert.Assert(t, is.DeepEqual(fifos, expected, cmpFIFOSet)) - files, err := ioutil.ReadDir(root) + files, err := os.ReadDir(root) assert.NilError(t, err) assert.Check(t, is.Len(files, 1)) assert.NilError(t, fifos.Close()) - files, err = ioutil.ReadDir(root) + files, err = os.ReadDir(root) assert.NilError(t, err) assert.Check(t, is.Len(files, 0)) } @@ -101,19 +99,19 @@ func TestNewAttach(t *testing.T) { fifos, err := NewFIFOSetInDir("", "theid", false) assert.NilError(t, err) - io, err := attacher(fifos) + attachedFifos, err := attacher(fifos) assert.NilError(t, err) - defer io.Close() + defer attachedFifos.Close() - producers := setupFIFOProducers(t, io.Config()) + producers := setupFIFOProducers(t, attachedFifos.Config()) initProducers(t, producers, expectedStdout, expectedStderr) - actualStdin, err := ioutil.ReadAll(producers.Stdin) + actualStdin, err := io.ReadAll(producers.Stdin) assert.NilError(t, err) - io.Wait() - io.Cancel() - assert.NilError(t, io.Close()) + attachedFifos.Wait() + attachedFifos.Cancel() + assert.NilError(t, attachedFifos.Close()) assert.Check(t, is.Equal(expectedStdout, stdout.String())) assert.Check(t, is.Equal(expectedStderr, stderr.String())) diff --git a/cio/io_unix.go b/cio/io_unix.go index 8b60067..5606cc8 100644 --- a/cio/io_unix.go +++ b/cio/io_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,15 +21,14 @@ package cio import ( "context" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" "syscall" "github.com/containerd/fifo" - "github.com/pkg/errors" ) // NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root @@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) { return nil, err } } - dir, err := ioutil.TempDir(root, "") + dir, err := os.MkdirTemp(root, "") if err != nil { return nil, err } @@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { if fifos.Stdin != "" { if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdin fifo") + return f, fmt.Errorf("failed to open stdin fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdin != nil { @@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if fifos.Stdout != "" { if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdout fifo") + return f, fmt.Errorf("failed to open stdout fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdout != nil { @@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if !fifos.Terminal && fifos.Stderr != "" { if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stderr fifo") + return f, fmt.Errorf("failed to open stderr fifo: %w", retErr) } } return f, nil diff --git a/cio/io_unix_test.go b/cio/io_unix_test.go index 6e0a9fc..d4e0a70 100644 --- a/cio/io_unix_test.go +++ b/cio/io_unix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,9 +21,6 @@ package cio import ( "context" - "fmt" - "io/ioutil" - "os" "path/filepath" "testing" @@ -65,11 +63,7 @@ func TestOpenFifosWithTerminal(t *testing.T) { var ctx, cancel = context.WithCancel(context.Background()) defer cancel() - ioFifoDir, err := ioutil.TempDir("", fmt.Sprintf("cio-%s", t.Name())) - if err != nil { - t.Fatalf("unexpected error during creating temp dir: %v", err) - } - defer os.RemoveAll(ioFifoDir) + ioFifoDir := t.TempDir() cfg := Config{ Stdout: filepath.Join(ioFifoDir, "test-stdout"), diff --git a/cio/io_windows.go b/cio/io_windows.go index ded4757..f3d736a 100644 --- a/cio/io_windows.go +++ b/cio/io_windows.go @@ -23,7 +23,6 @@ import ( winio "github.com/Microsoft/go-winio" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) const pipeRoot = `\\.\pipe` @@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdin != "" { l, err := winio.ListenPipe(fifos.Stdin, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin) + return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err) } cios.closers = append(cios.closers, l) @@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdout != "" { l, err := winio.ListenPipe(fifos.Stdout, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout) + return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err) } cios.closers = append(cios.closers, l) @@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stderr != "" { l, err := winio.ListenPipe(fifos.Stderr, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr) + return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err) } cios.closers = append(cios.closers, l) diff --git a/cio/io_windows_test.go b/cio/io_windows_test.go index 92fa1ff..e34457d 100644 --- a/cio/io_windows_test.go +++ b/cio/io_windows_test.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/client.go b/client.go index ace9bf6..1c2202e 100644 --- a/client.go +++ b/client.go @@ -21,7 +21,6 @@ import ( "context" "encoding/json" "fmt" - "net/http" "runtime" "strconv" "strings" @@ -62,10 +61,10 @@ import ( ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/sync/semaphore" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/health/grpc_health_v1" ) @@ -119,31 +118,33 @@ func New(address string, opts ...ClientOpt) (*Client, error) { } gopts := []grpc.DialOption{ grpc.WithBlock(), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.FailOnNonTempDialError(true), grpc.WithConnectParams(connParams), grpc.WithContextDialer(dialer.ContextDialer), - - // TODO(stevvooe): We may need to allow configuration of this on the client. - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), - grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), + grpc.WithReturnConnectionError(), } if len(copts.dialOptions) > 0 { gopts = copts.dialOptions } + gopts = append(gopts, grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize), + grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize))) + if len(copts.callOptions) > 0 { + gopts = append(gopts, grpc.WithDefaultCallOptions(copts.callOptions...)) + } if copts.defaultns != "" { unary, stream := newNSInterceptors(copts.defaultns) - gopts = append(gopts, - grpc.WithUnaryInterceptor(unary), - grpc.WithStreamInterceptor(stream), - ) + gopts = append(gopts, grpc.WithChainUnaryInterceptor(unary)) + gopts = append(gopts, grpc.WithChainStreamInterceptor(stream)) } + connector := func() (*grpc.ClientConn, error) { ctx, cancel := context.WithTimeout(context.Background(), copts.timeout) defer cancel() conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...) if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q", address) + return nil, fmt.Errorf("failed to dial %q: %w", address, err) } return conn, nil } @@ -154,7 +155,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) { c.conn, c.connector = conn, connector } if copts.services == nil && c.conn == nil { - return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available") + return nil, fmt.Errorf("no grpc connection or services is available: %w", errdefs.ErrUnavailable) } // check namespace labels for default runtime @@ -214,7 +215,7 @@ type Client struct { // Reconnect re-establishes the GRPC connection to the containerd daemon func (c *Client) Reconnect() error { if c.connector == nil { - return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available") + return fmt.Errorf("unable to reconnect to containerd, no connector available: %w", errdefs.ErrUnavailable) } c.connMu.Lock() defer c.connMu.Unlock() @@ -242,7 +243,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") + return false, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable) } c.connMu.Unlock() r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true)) @@ -265,8 +266,8 @@ func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container return out, nil } -// NewContainer will create a new container in container with the provided id -// the id must be unique within the namespace +// NewContainer will create a new container with the provided id. +// The id must be unique within the namespace. func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) { ctx, done, err := c.WithLease(ctx) if err != nil { @@ -369,9 +370,7 @@ type RemoteContext struct { func defaultRemoteContext() *RemoteContext { return &RemoteContext{ - Resolver: docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, - }), + Resolver: docker.NewResolver(docker.ResolverOptions{}), } } @@ -386,7 +385,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag } if fetchCtx.Unpack { - return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull") + return images.Image{}, fmt.Errorf("unpack on fetch not supported, try pull: %w", errdefs.ErrNotImplemented) } if fetchCtx.PlatformMatcher == nil { @@ -397,7 +396,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag for _, s := range fetchCtx.Platforms { p, err := platforms.Parse(s) if err != nil { - return images.Image{}, errors.Wrapf(err, "invalid platform %s", s) + return images.Image{}, fmt.Errorf("invalid platform %s: %w", s, err) } ps = append(ps, p) } @@ -433,7 +432,7 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, for _, platform := range pushCtx.Platforms { p, err := platforms.Parse(platform) if err != nil { - return errors.Wrapf(err, "invalid platform %s", platform) + return fmt.Errorf("invalid platform %s: %w", platform, err) } ps = append(ps, p) } @@ -716,7 +715,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") + return Version{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable) } c.connMu.Unlock() response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) @@ -739,7 +738,7 @@ func (c *Client) Server(ctx context.Context) (ServerInfo, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") + return ServerInfo{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable) } c.connMu.Unlock() @@ -777,7 +776,7 @@ func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Sna s := c.SnapshotService(name) if s == nil { - return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name) + return nil, fmt.Errorf("snapshotter %s was not found: %w", name, errdefs.ErrNotFound) } return s, nil diff --git a/client_opts.go b/client_opts.go index 44feaa3..2ef7575 100644 --- a/client_opts.go +++ b/client_opts.go @@ -34,6 +34,7 @@ type clientOpts struct { defaultPlatform platforms.MatchComparer services *services dialOptions []grpc.DialOption + callOptions []grpc.CallOption timeout time.Duration } @@ -75,6 +76,14 @@ func WithDialOpts(opts []grpc.DialOption) ClientOpt { } } +// WithCallOpts allows grpc.CallOptions to be set on the connection +func WithCallOpts(opts []grpc.CallOption) ClientOpt { + return func(c *clientOpts) error { + c.callOptions = opts + return nil + } +} + // WithServices sets services used by the client. func WithServices(opts ...ServicesOpt) ClientOpt { return func(c *clientOpts) error { diff --git a/cmd/containerd-shim-runc-v1/main.go b/cmd/containerd-shim-runc-v1/main.go index 1b1b106..9db5092 100644 --- a/cmd/containerd-shim-runc-v1/main.go +++ b/cmd/containerd-shim-runc-v1/main.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/cmd/containerd-shim-runc-v2/main.go b/cmd/containerd-shim-runc-v2/main.go index 4f5d804..c94942e 100644 --- a/cmd/containerd-shim-runc-v2/main.go +++ b/cmd/containerd-shim-runc-v2/main.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,10 +20,13 @@ package main import ( - v2 "github.com/containerd/containerd/runtime/v2/runc/v2" + "context" + + "github.com/containerd/containerd/runtime/v2/runc/manager" + _ "github.com/containerd/containerd/runtime/v2/runc/task/plugin" "github.com/containerd/containerd/runtime/v2/shim" ) func main() { - shim.Run("io.containerd.runc.v2", v2.New) + shim.RunManager(context.Background(), manager.NewShimManager("io.containerd.runc.v2")) } diff --git a/cmd/containerd-shim/main_unix.go b/cmd/containerd-shim/main_unix.go index a778f0a..024611b 100644 --- a/cmd/containerd-shim/main_unix.go +++ b/cmd/containerd-shim/main_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -26,7 +27,6 @@ import ( "io" "net" "os" - "os/exec" "os/signal" "runtime" "runtime/debug" @@ -45,8 +45,8 @@ import ( "github.com/containerd/ttrpc" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -153,7 +153,7 @@ func executeShim() error { } server, err := newServer() if err != nil { - return errors.Wrap(err, "failed creating server") + return fmt.Errorf("failed creating server: %w", err) } sv, err := shim.NewService( shim.Config{ @@ -211,7 +211,7 @@ func serve(ctx context.Context, server *ttrpc.Server, path string) error { p = abstractSocketPrefix + p } if len(p) > socketPathLimit { - return errors.Errorf("%q: unix socket path too long (> %d)", p, socketPathLimit) + return fmt.Errorf("%q: unix socket path too long (> %d)", p, socketPathLimit) } l, err = net.Listen("unix", p) } @@ -307,12 +307,12 @@ func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event if err != nil { return err } - status, err := reaper.Default.Wait(cmd, c) + status, err := reaper.Default.WaitTimeout(cmd, c, 30*time.Second) if err != nil { - return errors.Wrapf(err, "failed to publish event: %s", b.String()) + return fmt.Errorf("failed to publish event: %s: %w", b.String(), err) } if status != 0 { - return errors.Errorf("failed to publish event: %s", b.String()) + return fmt.Errorf("failed to publish event: %s", b.String()) } return nil } diff --git a/cmd/containerd-shim/shim_darwin.go b/cmd/containerd-shim/shim_darwin.go index d6dc230..7d652da 100644 --- a/cmd/containerd-shim/shim_darwin.go +++ b/cmd/containerd-shim/shim_darwin.go @@ -1,5 +1,3 @@ -// +build darwin - /* Copyright The containerd Authors. diff --git a/cmd/containerd-shim/shim_freebsd.go b/cmd/containerd-shim/shim_freebsd.go index 1fd8854..5cafaef 100644 --- a/cmd/containerd-shim/shim_freebsd.go +++ b/cmd/containerd-shim/shim_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/cmd/containerd-stress/density.go b/cmd/containerd-stress/density.go index 27b537d..8006a6d 100644 --- a/cmd/containerd-stress/density.go +++ b/cmd/containerd-stress/density.go @@ -21,7 +21,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "os/signal" "path/filepath" @@ -53,6 +52,7 @@ var densityCommand = cli.Command{ Duration: cliContext.GlobalDuration("duration"), Concurrency: cliContext.GlobalInt("concurrent"), Exec: cliContext.GlobalBool("exec"), + Image: cliContext.GlobalString("image"), JSON: cliContext.GlobalBool("json"), Metrics: cliContext.GlobalString("metrics"), Snapshotter: cliContext.GlobalString("snapshotter"), @@ -66,8 +66,8 @@ var densityCommand = cli.Command{ if err := cleanup(ctx, client); err != nil { return err } - logrus.Infof("pulling %s", imageName) - image, err := client.Pull(ctx, imageName, containerd.WithPullUnpack, containerd.WithPullSnapshotter(config.Snapshotter)) + logrus.Infof("pulling %s", config.Image) + image, err := client.Pull(ctx, config.Image, containerd.WithPullUnpack, containerd.WithPullSnapshotter(config.Snapshotter)) if err != nil { return err } @@ -76,9 +76,6 @@ var densityCommand = cli.Command{ s := make(chan os.Signal, 1) signal.Notify(s, syscall.SIGTERM, syscall.SIGINT) - if err != nil { - return err - } var ( pids []uint32 count = cliContext.Int("count") @@ -172,7 +169,7 @@ func getMaps(pid int) (map[string]int, error) { } func getppid(pid int) (int, error) { - bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + bytes, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) if err != nil { return 0, err } diff --git a/cmd/containerd-stress/exec_worker.go b/cmd/containerd-stress/exec_worker.go index 9810554..9b990d1 100644 --- a/cmd/containerd-stress/exec_worker.go +++ b/cmd/containerd-stress/exec_worker.go @@ -63,6 +63,12 @@ func (w *execWorker) exec(ctx, tctx context.Context) { logrus.WithError(err).Error("wait exec container's task") return } + + if err := task.Start(ctx); err != nil { + logrus.WithError(err).Error("exec container start failure") + return + } + spec, err := c.Spec(ctx) if err != nil { logrus.WithError(err).Error("failed to get spec") diff --git a/cmd/containerd-stress/main.go b/cmd/containerd-stress/main.go index 2706b82..1218b2a 100644 --- a/cmd/containerd-stress/main.go +++ b/cmd/containerd-stress/main.go @@ -36,8 +36,6 @@ import ( "github.com/urfave/cli" ) -const imageName = "docker.io/library/alpine:latest" - var ( ct metrics.LabeledTimer execTimer metrics.LabeledTimer @@ -136,6 +134,11 @@ func main() { Name: "exec", Usage: "add execs to the stress tests", }, + cli.StringFlag{ + Name: "image,i", + Value: "docker.io/library/alpine:latest", + Usage: "image to be utilized for testing", + }, cli.BoolFlag{ Name: "json,j", Usage: "output results in json format", @@ -173,6 +176,7 @@ func main() { Duration: context.GlobalDuration("duration"), Concurrency: context.GlobalInt("concurrent"), Exec: context.GlobalBool("exec"), + Image: context.GlobalString("image"), JSON: context.GlobalBool("json"), Metrics: context.GlobalString("metrics"), Runtime: context.GlobalString("runtime"), @@ -194,6 +198,7 @@ type config struct { Duration time.Duration Address string Exec bool + Image string JSON bool Metrics string Runtime string @@ -206,7 +211,12 @@ func (c config) newClient() (*containerd.Client, error) { func serve(c config) error { go func() { - if err := http.ListenAndServe(c.Metrics, metrics.Handler()); err != nil { + srv := &http.Server{ + Addr: c.Metrics, + Handler: metrics.Handler(), + ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout. + } + if err := srv.ListenAndServe(); err != nil { logrus.WithError(err).Error("listen and serve") } }() @@ -228,8 +238,8 @@ func test(c config) error { if err := cleanup(ctx, client); err != nil { return err } - logrus.Infof("pulling %s", imageName) - image, err := client.Pull(ctx, imageName, containerd.WithPullUnpack, containerd.WithPullSnapshotter(c.Snapshotter)) + logrus.Infof("pulling %s", c.Image) + image, err := client.Pull(ctx, c.Image, containerd.WithPullUnpack, containerd.WithPullSnapshotter(c.Snapshotter)) if err != nil { return err } diff --git a/cmd/containerd-stress/rlimit_freebsd.go b/cmd/containerd-stress/rlimit_freebsd.go index a1b39de..92b299a 100644 --- a/cmd/containerd-stress/rlimit_freebsd.go +++ b/cmd/containerd-stress/rlimit_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/cmd/containerd-stress/rlimit_unix.go b/cmd/containerd-stress/rlimit_unix.go index 492f604..e8fa749 100644 --- a/cmd/containerd-stress/rlimit_unix.go +++ b/cmd/containerd-stress/rlimit_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !freebsd // +build !windows,!freebsd /* diff --git a/cmd/containerd-stress/rlimit_windows.go b/cmd/containerd-stress/rlimit_windows.go index 22678ad..d98ec36 100644 --- a/cmd/containerd-stress/rlimit_windows.go +++ b/cmd/containerd-stress/rlimit_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/cmd/containerd/builtins.go b/cmd/containerd/builtins.go index b120b60..8c6f1fe 100644 --- a/cmd/containerd/builtins.go +++ b/cmd/containerd/builtins.go @@ -19,8 +19,10 @@ package main // register containerd builtins here import ( _ "github.com/containerd/containerd/diff/walking/plugin" + _ "github.com/containerd/containerd/events/plugin" _ "github.com/containerd/containerd/gc/scheduler" _ "github.com/containerd/containerd/runtime/restart/monitor" + _ "github.com/containerd/containerd/runtime/v2" _ "github.com/containerd/containerd/services/containers" _ "github.com/containerd/containerd/services/content" _ "github.com/containerd/containerd/services/diff" @@ -34,4 +36,5 @@ import ( _ "github.com/containerd/containerd/services/snapshots" _ "github.com/containerd/containerd/services/tasks" _ "github.com/containerd/containerd/services/version" + _ "github.com/containerd/containerd/tracing/plugin" ) diff --git a/cmd/containerd/builtins_aufs_linux.go b/cmd/containerd/builtins_aufs_linux.go index b6a9773..f06e01f 100644 --- a/cmd/containerd/builtins_aufs_linux.go +++ b/cmd/containerd/builtins_aufs_linux.go @@ -1,3 +1,4 @@ +//go:build !no_aufs // +build !no_aufs /* diff --git a/cmd/containerd/builtins_btrfs_linux.go b/cmd/containerd/builtins_btrfs_linux.go index 7eb7095..a6b11ca 100644 --- a/cmd/containerd/builtins_btrfs_linux.go +++ b/cmd/containerd/builtins_btrfs_linux.go @@ -1,3 +1,4 @@ +//go:build !no_btrfs && cgo // +build !no_btrfs,cgo /* diff --git a/cmd/containerd/builtins_cri.go b/cmd/containerd/builtins_cri.go index 4d5129d..c2d2825 100644 --- a/cmd/containerd/builtins_cri.go +++ b/cmd/containerd/builtins_cri.go @@ -1,3 +1,4 @@ +//go:build (linux && !no_cri) || (windows && !no_cri) // +build linux,!no_cri windows,!no_cri /* diff --git a/cmd/containerd/builtins_devmapper_linux.go b/cmd/containerd/builtins_devmapper_linux.go index 2d22d8c..0c03624 100644 --- a/cmd/containerd/builtins_devmapper_linux.go +++ b/cmd/containerd/builtins_devmapper_linux.go @@ -1,3 +1,4 @@ +//go:build !no_devmapper // +build !no_devmapper /* diff --git a/cmd/containerd/builtins_linux.go b/cmd/containerd/builtins_linux.go index 6505144..bb0defd 100644 --- a/cmd/containerd/builtins_linux.go +++ b/cmd/containerd/builtins_linux.go @@ -20,7 +20,6 @@ import ( _ "github.com/containerd/containerd/metrics/cgroups" _ "github.com/containerd/containerd/metrics/cgroups/v2" _ "github.com/containerd/containerd/runtime/v1/linux" - _ "github.com/containerd/containerd/runtime/v2" _ "github.com/containerd/containerd/runtime/v2/runc/options" _ "github.com/containerd/containerd/snapshots/native/plugin" _ "github.com/containerd/containerd/snapshots/overlay/plugin" diff --git a/cmd/containerd/builtins_unix.go b/cmd/containerd/builtins_unix.go index 143b249..2e5c1fa 100644 --- a/cmd/containerd/builtins_unix.go +++ b/cmd/containerd/builtins_unix.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd || solaris // +build darwin freebsd solaris /* diff --git a/cmd/containerd/builtins_windows.go b/cmd/containerd/builtins_windows.go index 3fdea10..8861ca3 100644 --- a/cmd/containerd/builtins_windows.go +++ b/cmd/containerd/builtins_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -21,7 +19,6 @@ package main import ( _ "github.com/containerd/containerd/diff/lcow" _ "github.com/containerd/containerd/diff/windows" - _ "github.com/containerd/containerd/runtime/v2" _ "github.com/containerd/containerd/snapshots/lcow" _ "github.com/containerd/containerd/snapshots/windows" ) diff --git a/cmd/containerd/builtins_zfs_linux.go b/cmd/containerd/builtins_zfs_linux.go index 8bc26e5..bde126a 100644 --- a/cmd/containerd/builtins_zfs_linux.go +++ b/cmd/containerd/builtins_zfs_linux.go @@ -1,3 +1,4 @@ +//go:build !no_zfs // +build !no_zfs /* diff --git a/cmd/containerd/command/config_unsupported.go b/cmd/containerd/command/config_unsupported.go index 8dc92bc..3d935de 100644 --- a/cmd/containerd/command/config_unsupported.go +++ b/cmd/containerd/command/config_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows && !solaris // +build !linux,!windows,!solaris /* diff --git a/cmd/containerd/command/main.go b/cmd/containerd/command/main.go index ca6acfe..42a09dc 100644 --- a/cmd/containerd/command/main.go +++ b/cmd/containerd/command/main.go @@ -19,7 +19,7 @@ package command import ( gocontext "context" "fmt" - "io/ioutil" + "io" "net" "os" "os/signal" @@ -30,12 +30,13 @@ import ( "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" + _ "github.com/containerd/containerd/metrics" // import containerd build info "github.com/containerd/containerd/mount" "github.com/containerd/containerd/services/server" srvconfig "github.com/containerd/containerd/services/server/config" "github.com/containerd/containerd/sys" + "github.com/containerd/containerd/tracing" "github.com/containerd/containerd/version" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" "google.golang.org/grpc/grpclog" @@ -53,7 +54,7 @@ high performance container runtime func init() { // Discard grpc logs so that they don't mess with our stdio - grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) + grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, io.Discard, io.Discard)) cli.VersionPrinter = func(c *cli.Context) { fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision) @@ -108,13 +109,15 @@ can be used and modified as necessary as a custom configuration.` } app.Action = func(context *cli.Context) error { var ( - start = time.Now() - signals = make(chan os.Signal, 2048) - serverC = make(chan *server.Server, 1) - ctx = gocontext.Background() - config = defaultConfig() + start = time.Now() + signals = make(chan os.Signal, 2048) + serverC = make(chan *server.Server, 1) + ctx, cancel = gocontext.WithCancel(gocontext.Background()) + config = defaultConfig() ) + defer cancel() + // Only try to load the config if it either exists, or the user explicitly // told us to load this path. configPath := context.GlobalString("config") @@ -138,20 +141,20 @@ can be used and modified as necessary as a custom configuration.` // Stop if we are registering or unregistering against Windows SCM. stop, err := registerUnregisterService(config.Root) if err != nil { - logrus.Fatal(err) + log.L.Fatal(err) } if stop { return nil } - done := handleSignals(ctx, signals, serverC) + done := handleSignals(ctx, signals, serverC, cancel) // start the signal handler as soon as we can to make sure that // we don't miss any signals during boot signal.Notify(signals, handledSignals...) // cleanup temp mounts if err := mount.SetTempMountLocation(filepath.Join(config.Root, "tmpmounts")); err != nil { - return errors.Wrap(err, "creating temp mount location") + return fmt.Errorf("creating temp mount location: %w", err) } // unmount all temp mounts on boot for the server warnings, err := mount.CleanupTempMounts(0) @@ -163,7 +166,7 @@ can be used and modified as necessary as a custom configuration.` } if config.GRPC.Address == "" { - return errors.Wrap(errdefs.ErrInvalidArgument, "grpc address cannot be empty") + return fmt.Errorf("grpc address cannot be empty: %w", errdefs.ErrInvalidArgument) } if config.TTRPC.Address == "" { // If TTRPC was not explicitly configured, use defaults based on GRPC. @@ -176,27 +179,66 @@ can be used and modified as necessary as a custom configuration.` "revision": version.Revision, }).Info("starting containerd") - server, err := server.New(ctx, config) - if err != nil { - return err + type srvResp struct { + s *server.Server + err error } - // Launch as a Windows Service if necessary - if err := launchService(server, done); err != nil { - logrus.Fatal(err) + // run server initialization in a goroutine so we don't end up blocking important things like SIGTERM handling + // while the server is initializing. + // As an example opening the bolt database will block forever if another containerd is already running and containerd + // will have to be `kill -9`'ed to recover. + chsrv := make(chan srvResp) + go func() { + defer close(chsrv) + + server, err := server.New(ctx, config) + if err != nil { + select { + case chsrv <- srvResp{err: err}: + case <-ctx.Done(): + } + return + } + + // Launch as a Windows Service if necessary + if err := launchService(server, done); err != nil { + log.L.Fatal(err) + } + select { + case <-ctx.Done(): + server.Stop() + case chsrv <- srvResp{s: server}: + } + }() + + var server *server.Server + select { + case <-ctx.Done(): + return ctx.Err() + case r := <-chsrv: + if r.err != nil { + return r.err + } + server = r.s } - serverC <- server + // We don't send the server down serverC directly in the goroutine above because we need it lower down. + select { + case <-ctx.Done(): + return ctx.Err() + case serverC <- server: + } if config.Debug.Address != "" { var l net.Listener if isLocalAddress(config.Debug.Address) { if l, err = sys.GetLocalListener(config.Debug.Address, config.Debug.UID, config.Debug.GID); err != nil { - return errors.Wrapf(err, "failed to get listener for debug endpoint") + return fmt.Errorf("failed to get listener for debug endpoint: %w", err) } } else { if l, err = net.Listen("tcp", config.Debug.Address); err != nil { - return errors.Wrapf(err, "failed to get listener for debug endpoint") + return fmt.Errorf("failed to get listener for debug endpoint: %w", err) } } serve(ctx, l, server.ServeDebug) @@ -204,37 +246,46 @@ can be used and modified as necessary as a custom configuration.` if config.Metrics.Address != "" { l, err := net.Listen("tcp", config.Metrics.Address) if err != nil { - return errors.Wrapf(err, "failed to get listener for metrics endpoint") + return fmt.Errorf("failed to get listener for metrics endpoint: %w", err) } serve(ctx, l, server.ServeMetrics) } // setup the ttrpc endpoint tl, err := sys.GetLocalListener(config.TTRPC.Address, config.TTRPC.UID, config.TTRPC.GID) if err != nil { - return errors.Wrapf(err, "failed to get listener for main ttrpc endpoint") + return fmt.Errorf("failed to get listener for main ttrpc endpoint: %w", err) } serve(ctx, tl, server.ServeTTRPC) if config.GRPC.TCPAddress != "" { l, err := net.Listen("tcp", config.GRPC.TCPAddress) if err != nil { - return errors.Wrapf(err, "failed to get listener for TCP grpc endpoint") + return fmt.Errorf("failed to get listener for TCP grpc endpoint: %w", err) } serve(ctx, l, server.ServeTCP) } // setup the main grpc endpoint l, err := sys.GetLocalListener(config.GRPC.Address, config.GRPC.UID, config.GRPC.GID) if err != nil { - return errors.Wrapf(err, "failed to get listener for main endpoint") + return fmt.Errorf("failed to get listener for main endpoint: %w", err) } serve(ctx, l, server.ServeGRPC) - if err := notifyReady(ctx); err != nil { - log.G(ctx).WithError(err).Warn("notify ready failed") - } + readyC := make(chan struct{}) + go func() { + server.Wait() + close(readyC) + }() - log.G(ctx).Infof("containerd successfully booted in %fs", time.Since(start).Seconds()) - <-done + select { + case <-readyC: + if err := notifyReady(ctx); err != nil { + log.G(ctx).WithError(err).Warn("notify ready failed") + } + log.G(ctx).Infof("containerd successfully booted in %fs", time.Since(start).Seconds()) + <-done + case <-done: + } return nil } return app @@ -260,6 +311,8 @@ func applyFlags(context *cli.Context, config *srvconfig.Config) error { if err := setLogFormat(config); err != nil { return err } + setLogHooks() + for _, v := range []struct { name string d *string @@ -293,36 +346,22 @@ func setLogLevel(context *cli.Context, config *srvconfig.Config) error { l = config.Debug.Level } if l != "" { - lvl, err := logrus.ParseLevel(l) - if err != nil { - return err - } - logrus.SetLevel(lvl) + return log.SetLevel(l) } return nil } func setLogFormat(config *srvconfig.Config) error { - f := config.Debug.Format + f := log.OutputFormat(config.Debug.Format) if f == "" { f = log.TextFormat } - switch f { - case log.TextFormat: - logrus.SetFormatter(&logrus.TextFormatter{ - TimestampFormat: log.RFC3339NanoFixed, - FullTimestamp: true, - }) - case log.JSONFormat: - logrus.SetFormatter(&logrus.JSONFormatter{ - TimestampFormat: log.RFC3339NanoFixed, - }) - default: - return errors.Errorf("unknown log format: %s", f) - } + return log.SetFormat(f) +} - return nil +func setLogHooks() { + logrus.StandardLogger().AddHook(tracing.NewLogrusHook()) } func dumpStacks(writeToFile bool) { @@ -337,7 +376,7 @@ func dumpStacks(writeToFile bool) { bufferLen *= 2 } buf = buf[:stackSize] - logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) + log.L.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) if writeToFile { // Also write to file to aid gathering diagnostics @@ -348,6 +387,6 @@ func dumpStacks(writeToFile bool) { } defer f.Close() f.WriteString(string(buf)) - logrus.Infof("goroutine stack dump written to %s", name) + log.L.Infof("goroutine stack dump written to %s", name) } } diff --git a/cmd/containerd/command/main_unix.go b/cmd/containerd/command/main_unix.go index 98fd698..e81de63 100644 --- a/cmd/containerd/command/main_unix.go +++ b/cmd/containerd/command/main_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris /* @@ -35,7 +36,7 @@ var handledSignals = []os.Signal{ unix.SIGPIPE, } -func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server) chan struct{} { +func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server, cancel func()) chan struct{} { done := make(chan struct{}, 1) go func() { var server *server.Server @@ -60,11 +61,10 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se log.G(ctx).WithError(err).Error("notify stopping failed") } - if server == nil { - close(done) - return + cancel() + if server != nil { + server.Stop() } - server.Stop() close(done) return } diff --git a/cmd/containerd/command/main_windows.go b/cmd/containerd/command/main_windows.go index 1803e18..6027035 100644 --- a/cmd/containerd/command/main_windows.go +++ b/cmd/containerd/command/main_windows.go @@ -39,7 +39,7 @@ var ( } ) -func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server) chan struct{} { +func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server, cancel func()) chan struct{} { done := make(chan struct{}) go func() { var server *server.Server @@ -54,12 +54,12 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se log.G(ctx).WithError(err).Error("notify stopping failed") } - if server == nil { - close(done) - return + cancel() + if server != nil { + server.Stop() } - server.Stop() close(done) + return } } }() diff --git a/cmd/containerd/command/notify_linux.go b/cmd/containerd/command/notify_linux.go index be3d580..f97a1d3 100644 --- a/cmd/containerd/command/notify_linux.go +++ b/cmd/containerd/command/notify_linux.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. diff --git a/cmd/containerd/command/notify_unsupported.go b/cmd/containerd/command/notify_unsupported.go index 3a05137..76b3f85 100644 --- a/cmd/containerd/command/notify_unsupported.go +++ b/cmd/containerd/command/notify_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/cmd/containerd/command/publish.go b/cmd/containerd/command/publish.go index 82e75f3..b18f19f 100644 --- a/cmd/containerd/command/publish.go +++ b/cmd/containerd/command/publish.go @@ -18,8 +18,8 @@ package command import ( gocontext "context" + "fmt" "io" - "io/ioutil" "net" "os" "time" @@ -29,10 +29,10 @@ import ( "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/dialer" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "github.com/urfave/cli" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials/insecure" ) var publishCommand = cli.Command{ @@ -52,7 +52,7 @@ var publishCommand = cli.Command{ ctx := namespaces.WithNamespace(gocontext.Background(), context.String("namespace")) topic := context.String("topic") if topic == "" { - return errors.Wrap(errdefs.ErrInvalidArgument, "topic required to publish event") + return fmt.Errorf("topic required to publish event: %w", errdefs.ErrInvalidArgument) } payload, err := getEventPayload(os.Stdin) if err != nil { @@ -73,7 +73,7 @@ var publishCommand = cli.Command{ } func getEventPayload(r io.Reader) (*types.Any, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } @@ -87,7 +87,7 @@ func getEventPayload(r io.Reader) (*types.Any, error) { func connectEvents(address string) (eventsapi.EventsClient, error) { conn, err := connect(address, dialer.ContextDialer) if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q", address) + return nil, fmt.Errorf("failed to dial %q: %w", address, err) } return eventsapi.NewEventsClient(conn), nil } @@ -100,7 +100,7 @@ func connect(address string, d func(gocontext.Context, string) (net.Conn, error) } gopts := []grpc.DialOption{ grpc.WithBlock(), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(d), grpc.FailOnNonTempDialError(true), grpc.WithConnectParams(connParams), @@ -109,7 +109,7 @@ func connect(address string, d func(gocontext.Context, string) (net.Conn, error) defer cancel() conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...) if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q", address) + return nil, fmt.Errorf("failed to dial %q: %w", address, err) } return conn, nil } diff --git a/cmd/containerd/command/service_unsupported.go b/cmd/containerd/command/service_unsupported.go index b282052..2ea02af 100644 --- a/cmd/containerd/command/service_unsupported.go +++ b/cmd/containerd/command/service_unsupported.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/cmd/containerd/command/service_windows.go b/cmd/containerd/command/service_windows.go index d683c18..eb2062c 100644 --- a/cmd/containerd/command/service_windows.go +++ b/cmd/containerd/command/service_windows.go @@ -18,19 +18,17 @@ package command import ( "fmt" - "io/ioutil" "log" "os" - "os/exec" "path/filepath" "time" "unsafe" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/services/server" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/debug" @@ -215,10 +213,9 @@ func unregisterService() error { // to handle (un-)registering against Windows Service Control Manager (SCM). // It returns an indication to stop on successful SCM operation, and an error. func registerUnregisterService(root string) (bool, error) { - if unregisterServiceFlag { if registerServiceFlag { - return true, errors.Wrap(errdefs.ErrInvalidArgument, "--register-service and --unregister-service cannot be used together") + return true, fmt.Errorf("--register-service and --unregister-service cannot be used together: %w", errdefs.ErrInvalidArgument) } return true, unregisterService() } @@ -242,29 +239,64 @@ func registerUnregisterService(root string) (bool, error) { // and we want to make sure stderr goes to the panic file. r, _, err := allocConsole.Call() if r == 0 && err != nil { - return true, fmt.Errorf("error allocating conhost: %s", err) + return true, fmt.Errorf("error allocating conhost: %w", err) } if err := initPanicFile(filepath.Join(root, "panic.log")); err != nil { return true, err } - logOutput := ioutil.Discard + // The usual advice for Windows services is to either write to a log file or to the windows event + // log, the former of which we've exposed here via a --log-file flag. We additionally write panic + // stacks to a panic.log file to diagnose crashes. Below details the two different outcomes if + // --log-file is specified or not: + // + // --log-file is *not* specified. + // ------------------------------- + // -logrus, the stdlibs logging package and os.Stderr output will go to + // NUL (Windows' /dev/null equivalent). + // -Panics will write their stack trace to the panic.log file. + // -Writing to the handle returned from GetStdHandle(STD_ERROR_HANDLE) will write + // to the panic.log file as the underlying handle itself has been redirected. + // + // --log-file *is* specified + // ------------------------------- + // -Logging to logrus, the stdlibs logging package or directly to + // os.Stderr will all go to the log file specified. + // -Panics will write their stack trace to the panic.log file. + // -Writing to the handle returned from GetStdHandle(STD_ERROR_HANDLE) will write + // to the panic.log file as the underlying handle itself has been redirected. + var f *os.File if logFileFlag != "" { - f, err := os.OpenFile(logFileFlag, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + f, err = os.OpenFile(logFileFlag, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { - return true, errors.Wrapf(err, "open log file %q", logFileFlag) + return true, fmt.Errorf("open log file %q: %w", logFileFlag, err) + } + } else { + // Windows services start with NULL stdio handles, and thus os.Stderr and friends will be + // backed by an os.File with a NULL handle. This means writes to os.Stderr will fail, which + // isn't a huge issue as we want output to be discarded if the user doesn't ask for the log + // file. However, writes succeeding but just going to the ether is a much better construct + // so use devnull instead of relying on writes failing. We use devnull instead of io.Discard + // as os.Stderr is an os.File and can't be assigned to io.Discard. + f, err = os.OpenFile(os.DevNull, os.O_WRONLY, 0) + if err != nil { + return true, err } - logOutput = f } - logrus.SetOutput(logOutput) + // Reassign os.Stderr to the log file or NUL. Shim logs are copied to os.Stderr + // directly so this ensures those will end up in the log file as well if specified. + os.Stderr = f + // Assign the stdlibs log package in case of any miscellaneous uses by + // dependencies. + log.SetOutput(f) + logrus.SetOutput(f) } return false, nil } // launchService is the entry point for running the daemon under SCM. func launchService(s *server.Server, done chan struct{}) error { - if !runServiceFlag { return nil } @@ -275,7 +307,7 @@ func launchService(s *server.Server, done chan struct{}) error { done: done, } - interactive, err := svc.IsAnInteractiveSession() // nolint:staticcheck + interactive, err := svc.IsAnInteractiveSession() //nolint:staticcheck if err != nil { return err } @@ -360,12 +392,6 @@ func initPanicFile(path string) error { return err } - // Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected) - os.Stderr = os.NewFile(panicFile.Fd(), "/dev/stderr") - - // Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether - log.SetOutput(os.Stderr) - return nil } diff --git a/cmd/containerd/main.go b/cmd/containerd/main.go index 10bde45..dcfa66b 100644 --- a/cmd/containerd/main.go +++ b/cmd/containerd/main.go @@ -21,10 +21,11 @@ import ( "os" "github.com/containerd/containerd/cmd/containerd/command" - "github.com/containerd/containerd/pkg/seed" + "github.com/containerd/containerd/pkg/seed" //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies ) func init() { + //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies seed.WithTimeAndRand() } diff --git a/cmd/ctr/app/main.go b/cmd/ctr/app/main.go index bf27438..4beb24b 100644 --- a/cmd/ctr/app/main.go +++ b/cmd/ctr/app/main.go @@ -18,7 +18,7 @@ package app import ( "fmt" - "io/ioutil" + "io" "github.com/containerd/containerd/cmd/ctr/commands/containers" "github.com/containerd/containerd/cmd/ctr/commands/content" @@ -46,7 +46,7 @@ var extraCmds = []cli.Command{} func init() { // Discard grpc logs so that they don't mess with our stdio - grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) + grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, io.Discard, io.Discard)) cli.VersionPrinter = func(c *cli.Context) { fmt.Println(c.App.Name, version.Package, c.App.Version) diff --git a/cmd/ctr/app/main_unix.go b/cmd/ctr/app/main_unix.go index c0eb1b6..f922e4e 100644 --- a/cmd/ctr/app/main_unix.go +++ b/cmd/ctr/app/main_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/cmd/ctr/commands/commands.go b/cmd/ctr/commands/commands.go index 5a58e9a..7823aa4 100644 --- a/cmd/ctr/commands/commands.go +++ b/cmd/ctr/commands/commands.go @@ -24,6 +24,8 @@ import ( "strings" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/pkg/atomicfile" + "github.com/urfave/cli" ) @@ -37,6 +39,12 @@ var ( }, } + // SnapshotterLabels are cli flags specifying labels which will be add to the new snapshot for container. + SnapshotterLabels = cli.StringSliceFlag{ + Name: "snapshotter-label", + Usage: "labels added to the new snapshot for this container.", + } + // LabelFlag is a cli flag specifying labels LabelFlag = cli.StringSliceFlag{ Name: "label", @@ -110,6 +118,10 @@ var ( Name: "label", Usage: "specify additional labels (e.g. foo=bar)", }, + cli.StringSliceFlag{ + Name: "annotation", + Usage: "specify additional OCI annotations (e.g. foo=bar)", + }, cli.StringSliceFlag{ Name: "mount", Usage: "specify additional container mount (e.g. type=bind,src=/tmp,dst=/host,options=rbind:ro)", @@ -147,7 +159,7 @@ var ( Name: "pid-file", Usage: "file path to write the task's pid", }, - cli.IntFlag{ + cli.IntSliceFlag{ Name: "gpus", Usage: "add gpus to the container", }, @@ -163,6 +175,14 @@ var ( Name: "device", Usage: "file path to a device to add to the container; or a path to a directory tree of devices to add to the container", }, + cli.StringSliceFlag{ + Name: "cap-add", + Usage: "add Linux capabilities (Set capabilities with 'CAP_' prefix)", + }, + cli.StringSliceFlag{ + Name: "cap-drop", + Usage: "drop Linux capabilities (Set capabilities with 'CAP_' prefix)", + }, cli.BoolFlag{ Name: "seccomp", Usage: "enable the default seccomp profile", @@ -179,6 +199,10 @@ var ( Name: "apparmor-profile", Usage: "enable AppArmor with an existing custom profile", }, + cli.StringFlag{ + Name: "rdt-class", + Usage: "name of the RDT class to associate the container with. Specifies a Class of Service (CLOS) for cache and memory bandwidth management.", + }, } ) @@ -209,6 +233,19 @@ func LabelArgs(labelStrings []string) map[string]string { return labels } +// AnnotationArgs returns a map of annotation key,value pairs. +func AnnotationArgs(annoStrings []string) (map[string]string, error) { + annotations := make(map[string]string, len(annoStrings)) + for _, anno := range annoStrings { + parts := strings.SplitN(anno, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid key=value format annotation: %v", anno) + } + annotations[parts[0]] = parts[1] + } + return annotations, nil +} + // PrintAsJSON prints input in JSON format func PrintAsJSON(x interface{}) { b, err := json.MarshalIndent(x, "", " ") @@ -224,15 +261,14 @@ func WritePidFile(path string, pid int) error { if err != nil { return err } - tempPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path))) - f, err := os.OpenFile(tempPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) + f, err := atomicfile.New(path, 0o666) if err != nil { return err } _, err = fmt.Fprintf(f, "%d", pid) - f.Close() if err != nil { + f.Cancel() return err } - return os.Rename(tempPath, path) + return f.Close() } diff --git a/cmd/ctr/commands/commands_unix.go b/cmd/ctr/commands/commands_unix.go index 38ee594..44b81e1 100644 --- a/cmd/ctr/commands/commands_unix.go +++ b/cmd/ctr/commands/commands_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -36,5 +37,8 @@ func init() { }, cli.Uint64Flag{ Name: "cpu-period", Usage: "Limit CPU CFS period", + }, cli.StringFlag{ + Name: "rootfs-propagation", + Usage: "set the propagation of the container rootfs", }) } diff --git a/cmd/ctr/commands/commands_windows.go b/cmd/ctr/commands/commands_windows.go index 4bd3d25..5c1d98e 100644 --- a/cmd/ctr/commands/commands_windows.go +++ b/cmd/ctr/commands/commands_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/cmd/ctr/commands/containers/checkpoint.go b/cmd/ctr/commands/containers/checkpoint.go index 53bf70b..62804f4 100644 --- a/cmd/ctr/commands/containers/checkpoint.go +++ b/cmd/ctr/commands/containers/checkpoint.go @@ -17,12 +17,12 @@ package containers import ( + "errors" "fmt" "github.com/containerd/containerd" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -88,7 +88,7 @@ var checkpointCommand = cli.Command{ } defer func() { if err := task.Resume(ctx); err != nil { - fmt.Println(errors.Wrap(err, "error resuming task")) + fmt.Println(fmt.Errorf("error resuming task: %w", err)) } }() } diff --git a/cmd/ctr/commands/containers/containers.go b/cmd/ctr/commands/containers/containers.go index f87b0c6..d102534 100644 --- a/cmd/ctr/commands/containers/containers.go +++ b/cmd/ctr/commands/containers/containers.go @@ -31,7 +31,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/containerd/typeurl" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -55,7 +54,7 @@ var createCommand = cli.Command{ Name: "create", Usage: "create container", ArgsUsage: "[flags] Image|RootFS CONTAINER [COMMAND] [ARG...]", - Flags: append(commands.SnapshotterFlags, commands.ContainerFlags...), + Flags: append(append(commands.SnapshotterFlags, []cli.Flag{commands.SnapshotterLabels}...), commands.ContainerFlags...), Action: func(context *cli.Context) error { var ( id string @@ -66,17 +65,17 @@ var createCommand = cli.Command{ if config { id = context.Args().First() if context.NArg() > 1 { - return errors.Wrap(errdefs.ErrInvalidArgument, "with spec config file, only container id should be provided") + return fmt.Errorf("with spec config file, only container id should be provided: %w", errdefs.ErrInvalidArgument) } } else { id = context.Args().Get(1) ref = context.Args().First() if ref == "" { - return errors.Wrap(errdefs.ErrInvalidArgument, "image ref must be provided") + return fmt.Errorf("image ref must be provided: %w", errdefs.ErrInvalidArgument) } } if id == "" { - return errors.Wrap(errdefs.ErrInvalidArgument, "container id must be provided") + return fmt.Errorf("container id must be provided: %w", errdefs.ErrInvalidArgument) } client, ctx, cancel, err := commands.NewClient(context) if err != nil { @@ -149,7 +148,7 @@ var deleteCommand = cli.Command{ Name: "delete", Usage: "delete one or more existing containers", ArgsUsage: "[flags] CONTAINER [CONTAINER, ...]", - Aliases: []string{"del", "rm"}, + Aliases: []string{"del", "remove", "rm"}, Flags: []cli.Flag{ cli.BoolFlag{ Name: "keep-snapshot", @@ -169,7 +168,7 @@ var deleteCommand = cli.Command{ } if context.NArg() == 0 { - return errors.Wrap(errdefs.ErrInvalidArgument, "must specify at least one container to delete") + return fmt.Errorf("must specify at least one container to delete: %w", errdefs.ErrInvalidArgument) } for _, arg := range context.Args() { if err := deleteContainer(ctx, client, arg, deleteOpts...); err != nil { @@ -215,7 +214,7 @@ var setLabelsCommand = cli.Command{ Action: func(context *cli.Context) error { containerID, labels := commands.ObjectWithLabelArgs(context) if containerID == "" { - return errors.Wrap(errdefs.ErrInvalidArgument, "container id must be provided") + return fmt.Errorf("container id must be provided: %w", errdefs.ErrInvalidArgument) } client, ctx, cancel, err := commands.NewClient(context) if err != nil { @@ -257,7 +256,7 @@ var infoCommand = cli.Command{ Action: func(context *cli.Context) error { id := context.Args().First() if id == "" { - return errors.Wrap(errdefs.ErrInvalidArgument, "container id must be provided") + return fmt.Errorf("container id must be provided: %w", errdefs.ErrInvalidArgument) } client, ctx, cancel, err := commands.NewClient(context) if err != nil { diff --git a/cmd/ctr/commands/containers/restore.go b/cmd/ctr/commands/containers/restore.go index 85337b3..2847340 100644 --- a/cmd/ctr/commands/containers/restore.go +++ b/cmd/ctr/commands/containers/restore.go @@ -17,11 +17,12 @@ package containers import ( + "errors" + "github.com/containerd/containerd" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" "github.com/urfave/cli" ) diff --git a/cmd/ctr/commands/content/content.go b/cmd/ctr/commands/content/content.go index 6f61fca..f0458dd 100644 --- a/cmd/ctr/commands/content/content.go +++ b/cmd/ctr/commands/content/content.go @@ -17,11 +17,10 @@ package content import ( + "errors" "fmt" "io" - "io/ioutil" "os" - "os/exec" "strings" "text/tabwriter" "time" @@ -33,8 +32,8 @@ import ( units "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/urfave/cli" + exec "golang.org/x/sys/execabs" ) var ( @@ -519,7 +518,7 @@ func edit(context *cli.Context, rd io.Reader) (io.ReadCloser, error) { return nil, fmt.Errorf("editor is required") } - tmp, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "edit-") + tmp, err := os.CreateTemp(os.Getenv("XDG_RUNTIME_DIR"), "edit-") if err != nil { return nil, err } diff --git a/cmd/ctr/commands/content/fetch.go b/cmd/ctr/commands/content/fetch.go index 4dc1087..aef9e86 100644 --- a/cmd/ctr/commands/content/fetch.go +++ b/cmd/ctr/commands/content/fetch.go @@ -280,7 +280,7 @@ outer: info, err := cs.Info(ctx, j.Digest) if err != nil { if !errdefs.IsNotFound(err) { - log.G(ctx).WithError(err).Errorf("failed to get content info") + log.G(ctx).WithError(err).Error("failed to get content info") continue outer } else { statuses[key] = StatusInfo{ diff --git a/cmd/ctr/commands/images/convert.go b/cmd/ctr/commands/images/convert.go index 3baec0b..e2bcd0d 100644 --- a/cmd/ctr/commands/images/convert.go +++ b/cmd/ctr/commands/images/convert.go @@ -17,6 +17,7 @@ package images import ( + "errors" "fmt" "github.com/containerd/containerd/cmd/ctr/commands" @@ -24,7 +25,6 @@ import ( "github.com/containerd/containerd/images/converter/uncompress" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -74,7 +74,7 @@ When '--all-platforms' is given all images in a manifest list must be available. for _, ps := range pss { p, err := platforms.Parse(ps) if err != nil { - return errors.Wrapf(err, "invalid platform %q", ps) + return fmt.Errorf("invalid platform %q: %w", ps, err) } all = append(all, p) } diff --git a/cmd/ctr/commands/images/export.go b/cmd/ctr/commands/images/export.go index 41d8893..5050ff4 100644 --- a/cmd/ctr/commands/images/export.go +++ b/cmd/ctr/commands/images/export.go @@ -17,6 +17,8 @@ package images import ( + "errors" + "fmt" "io" "os" @@ -24,7 +26,6 @@ import ( "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -73,13 +74,13 @@ When '--all-platforms' is given all images in a manifest list must be available. for _, ps := range pss { p, err := platforms.Parse(ps) if err != nil { - return errors.Wrapf(err, "invalid platform %q", ps) + return fmt.Errorf("invalid platform %q: %w", ps, err) } all = append(all, p) } exportOpts = append(exportOpts, archive.WithPlatform(platforms.Ordered(all...))) } else { - exportOpts = append(exportOpts, archive.WithPlatform(platforms.Default())) + exportOpts = append(exportOpts, archive.WithPlatform(platforms.DefaultStrict())) } if context.Bool("all-platforms") { diff --git a/cmd/ctr/commands/images/images.go b/cmd/ctr/commands/images/images.go index 077afd8..a68d216 100644 --- a/cmd/ctr/commands/images/images.go +++ b/cmd/ctr/commands/images/images.go @@ -17,6 +17,7 @@ package images import ( + "errors" "fmt" "os" "sort" @@ -29,7 +30,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/progress" "github.com/containerd/containerd/platforms" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -82,7 +82,7 @@ var listCommand = cli.Command{ ) imageList, err := imageStore.List(ctx, filters...) if err != nil { - return errors.Wrap(err, "failed to list images") + return fmt.Errorf("failed to list images: %w", err) } if quiet { for _, image := range imageList { @@ -224,7 +224,7 @@ var checkCommand = cli.Command{ args := []string(context.Args()) imageList, err := client.ListImages(ctx, args...) if err != nil { - return errors.Wrap(err, "failed listing images") + return fmt.Errorf("failed listing images: %w", err) } if len(imageList) == 0 { log.G(ctx).Debugf("no images found") @@ -248,7 +248,7 @@ var checkCommand = cli.Command{ available, required, present, missing, err := images.Check(ctx, contentStore, image.Target(), platforms.Default()) if err != nil { if exitErr == nil { - exitErr = errors.Wrapf(err, "unable to check %v", image.Name()) + exitErr = fmt.Errorf("unable to check %v: %w", image.Name(), err) } log.G(ctx).WithError(err).Errorf("unable to check %v", image.Name()) status = "error" @@ -284,7 +284,7 @@ var checkCommand = cli.Command{ unpacked, err := image.IsUnpacked(ctx, context.String("snapshotter")) if err != nil { if exitErr == nil { - exitErr = errors.Wrapf(err, "unable to check unpack for %v", image.Name()) + exitErr = fmt.Errorf("unable to check unpack for %v: %w", image.Name(), err) } log.G(ctx).WithError(err).Errorf("unable to check unpack for %v", image.Name()) } @@ -311,8 +311,8 @@ var checkCommand = cli.Command{ } var removeCommand = cli.Command{ - Name: "remove", - Aliases: []string{"rm"}, + Name: "delete", + Aliases: []string{"del", "remove", "rm"}, Usage: "remove one or more images by reference", ArgsUsage: "[flags] [, ...]", Description: "remove one or more images by reference", @@ -340,7 +340,7 @@ var removeCommand = cli.Command{ if err := imageStore.Delete(ctx, target, opts...); err != nil { if !errdefs.IsNotFound(err) { if exitErr == nil { - exitErr = errors.Wrapf(err, "unable to delete %v", target) + exitErr = fmt.Errorf("unable to delete %v: %w", target, err) } log.G(ctx).WithError(err).Errorf("unable to delete %v", target) continue diff --git a/cmd/ctr/commands/images/import.go b/cmd/ctr/commands/images/import.go index e7a4945..850b8a2 100644 --- a/cmd/ctr/commands/images/import.go +++ b/cmd/ctr/commands/images/import.go @@ -26,6 +26,7 @@ import ( "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" "github.com/urfave/cli" ) @@ -60,6 +61,10 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb Name: "digests", Usage: "whether to create digest images (default: false)", }, + cli.BoolFlag{ + Name: "skip-digest-for-named", + Usage: "skip applying --digests option to images named in the importing tar (use it in conjunction with --digests)", + }, cli.StringFlag{ Name: "index-name", Usage: "image name to keep index as, by default index is discarded", @@ -68,6 +73,10 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb Name: "all-platforms", Usage: "imports content for all platforms, false by default", }, + cli.StringFlag{ + Name: "platform", + Usage: "imports content for specific platform", + }, cli.BoolFlag{ Name: "no-unpack", Usage: "skip unpacking the images, false by default", @@ -80,8 +89,9 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb Action: func(context *cli.Context) error { var ( - in = context.Args().First() - opts []containerd.ImportOpt + in = context.Args().First() + opts []containerd.ImportOpt + platformMatcher platforms.MatchComparer ) prefix := context.String("base-name") @@ -96,6 +106,12 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb if context.Bool("digests") { opts = append(opts, containerd.WithDigestRef(archive.DigestTranslator(prefix))) } + if context.Bool("skip-digest-for-named") { + if !context.Bool("digests") { + return fmt.Errorf("--skip-digest-for-named must be specified with --digests option") + } + opts = append(opts, containerd.WithSkipDigestRef(func(name string) bool { return name != "" })) + } if idxName := context.String("index-name"); idxName != "" { opts = append(opts, containerd.WithIndexName(idxName)) @@ -105,6 +121,15 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb opts = append(opts, containerd.WithImportCompression()) } + if platform := context.String("platform"); platform != "" { + platSpec, err := platforms.Parse(platform) + if err != nil { + return err + } + platformMatcher = platforms.OnlyStrict(platSpec) + opts = append(opts, containerd.WithImportPlatform(platformMatcher)) + } + opts = append(opts, containerd.WithAllPlatforms(context.Bool("all-platforms"))) client, ctx, cancel, err := commands.NewClient(context) @@ -135,8 +160,10 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb log.G(ctx).Debugf("unpacking %d images", len(imgs)) for _, img := range imgs { - // TODO: Allow configuration of the platform - image := containerd.NewImage(client, img) + if platformMatcher == nil { // if platform not specified use default. + platformMatcher = platforms.Default() + } + image := containerd.NewImageWithPlatform(client, img, platformMatcher) // TODO: Show unpack status fmt.Printf("unpacking %s (%s)...", img.Name, img.Target.Digest) diff --git a/cmd/ctr/commands/images/mount.go b/cmd/ctr/commands/images/mount.go index c0e03dc..a907ad5 100644 --- a/cmd/ctr/commands/images/mount.go +++ b/cmd/ctr/commands/images/mount.go @@ -27,7 +27,6 @@ import ( "github.com/containerd/containerd/mount" "github.com/containerd/containerd/platforms" "github.com/opencontainers/image-spec/identity" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -93,7 +92,7 @@ When you are done, use the unmount command. ps := context.String("platform") p, err := platforms.Parse(ps) if err != nil { - return errors.Wrapf(err, "unable to parse platform %s", ps) + return fmt.Errorf("unable to parse platform %s: %w", ps, err) } img, err := client.ImageService().Get(ctx, ref) @@ -103,7 +102,7 @@ When you are done, use the unmount command. i := containerd.NewImageWithPlatform(client, img, platforms.Only(p)) if err := i.Unpack(ctx, snapshotter); err != nil { - return errors.Wrap(err, "error unpacking image") + return fmt.Errorf("error unpacking image: %w", err) } diffIDs, err := i.RootFS(ctx) diff --git a/cmd/ctr/commands/images/pull.go b/cmd/ctr/commands/images/pull.go index 7a5db21..c46dcd4 100644 --- a/cmd/ctr/commands/images/pull.go +++ b/cmd/ctr/commands/images/pull.go @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/platforms" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -106,13 +105,13 @@ command. As part of this process, we do the following: if context.Bool("all-platforms") { p, err = images.Platforms(ctx, client.ContentStore(), img.Target) if err != nil { - return errors.Wrap(err, "unable to resolve image platforms") + return fmt.Errorf("unable to resolve image platforms: %w", err) } } else { for _, s := range context.StringSlice("platform") { ps, err := platforms.Parse(s) if err != nil { - return errors.Wrapf(err, "unable to parse platform %s", s) + return fmt.Errorf("unable to parse platform %s: %w", s, err) } p = append(p, ps) } diff --git a/cmd/ctr/commands/images/push.go b/cmd/ctr/commands/images/push.go index da9e896..7123878 100644 --- a/cmd/ctr/commands/images/push.go +++ b/cmd/ctr/commands/images/push.go @@ -18,6 +18,8 @@ package images import ( gocontext "context" + "errors" + "fmt" "net/http/httptrace" "os" "sync" @@ -35,7 +37,6 @@ import ( "github.com/containerd/containerd/remotes/docker" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/urfave/cli" "golang.org/x/sync/errgroup" ) @@ -67,6 +68,9 @@ var pushCommand = cli.Command{ }, cli.IntFlag{ Name: "max-concurrent-uploaded-layers", Usage: "Set the max concurrent uploaded layers for each push", + }, cli.BoolFlag{ + Name: "allow-non-distributable-blobs", + Usage: "Allow pushing blobs that are marked as non-distributable", }), Action: func(context *cli.Context) error { var ( @@ -88,7 +92,7 @@ var pushCommand = cli.Command{ if manifest := context.String("manifest"); manifest != "" { desc.Digest, err = digest.Parse(manifest) if err != nil { - return errors.Wrap(err, "invalid manifest digest") + return fmt.Errorf("invalid manifest digest: %w", err) } desc.MediaType = context.String("manifest-type") } else { @@ -97,14 +101,14 @@ var pushCommand = cli.Command{ } img, err := client.ImageService().Get(ctx, local) if err != nil { - return errors.Wrap(err, "unable to resolve image to manifest") + return fmt.Errorf("unable to resolve image to manifest: %w", err) } desc = img.Target if pss := context.StringSlice("platform"); len(pss) == 1 { p, err := platforms.Parse(pss[0]) if err != nil { - return errors.Wrapf(err, "invalid platform %q", pss[0]) + return fmt.Errorf("invalid platform %q: %w", pss[0], err) } cs := client.ContentStore() @@ -113,7 +117,7 @@ var pushCommand = cli.Command{ for _, manifest := range manifests { if manifest.Platform != nil && matcher.Match(*manifest.Platform) { if _, err := images.Children(ctx, cs, manifest); err != nil { - return errors.Wrap(err, "no matching manifest") + return fmt.Errorf("no matching manifest: %w", err) } desc = manifest break @@ -143,13 +147,21 @@ var pushCommand = cli.Command{ log.G(ctx).WithField("image", ref).WithField("digest", desc.Digest).Debug("pushing") jobHandler := images.HandlerFunc(func(ctx gocontext.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if !context.Bool("allow-non-distributable-blobs") && images.IsNonDistributable(desc.MediaType) { + return nil, nil + } ongoing.add(remotes.MakeRefKey(ctx, desc)) return nil, nil }) + handler := jobHandler + if !context.Bool("allow-non-distributable-blobs") { + handler = remotes.SkipNonDistributableBlobs(handler) + } + ropts := []containerd.RemoteOpt{ containerd.WithResolver(resolver), - containerd.WithImageHandler(jobHandler), + containerd.WithImageHandler(handler), } if context.IsSet("max-concurrent-uploaded-layers") { diff --git a/cmd/ctr/commands/images/unmount.go b/cmd/ctr/commands/images/unmount.go index 3f54f63..f98570d 100644 --- a/cmd/ctr/commands/images/unmount.go +++ b/cmd/ctr/commands/images/unmount.go @@ -23,7 +23,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -60,10 +59,10 @@ var unmountCommand = cli.Command{ snapshotter := context.String("snapshotter") s := client.SnapshotService(snapshotter) if err := client.LeasesService().Delete(ctx, leases.Lease{ID: target}); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "error deleting lease") + return fmt.Errorf("error deleting lease: %w", err) } if err := s.Remove(ctx, target); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "error removing snapshot") + return fmt.Errorf("error removing snapshot: %w", err) } } diff --git a/cmd/ctr/commands/leases/leases.go b/cmd/ctr/commands/leases/leases.go index d0b107d..d9f1025 100644 --- a/cmd/ctr/commands/leases/leases.go +++ b/cmd/ctr/commands/leases/leases.go @@ -26,7 +26,6 @@ import ( "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/leases" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -69,7 +68,7 @@ var listCommand = cli.Command{ leaseList, err := ls.List(ctx, filters...) if err != nil { - return errors.Wrap(err, "failed to list leases") + return fmt.Errorf("failed to list leases: %w", err) } if quiet { for _, l := range leaseList { @@ -159,7 +158,7 @@ var createCommand = cli.Command{ var deleteCommand = cli.Command{ Name: "delete", - Aliases: []string{"rm"}, + Aliases: []string{"del", "remove", "rm"}, Usage: "delete a lease", ArgsUsage: "[flags] ...", Description: "delete a lease", diff --git a/cmd/ctr/commands/namespaces/namespaces.go b/cmd/ctr/commands/namespaces/namespaces.go index 93455b1..ddab3f5 100644 --- a/cmd/ctr/commands/namespaces/namespaces.go +++ b/cmd/ctr/commands/namespaces/namespaces.go @@ -17,6 +17,7 @@ package namespaces import ( + "errors" "fmt" "os" "sort" @@ -26,7 +27,6 @@ import ( "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -47,7 +47,7 @@ var createCommand = cli.Command{ Name: "create", Aliases: []string{"c"}, Usage: "create a new namespace", - ArgsUsage: " [= [=]", Description: "create a new namespace. it must be unique", Action: func(context *cli.Context) error { namespace, labels := commands.ObjectWithLabelArgs(context) @@ -68,7 +68,7 @@ var setLabelsCommand = cli.Command{ Name: "label", Usage: "set and clear labels for a namespace", ArgsUsage: " [=, ...]", - Description: "set and clear labels for a namespace", + Description: "set and clear labels for a namespace. empty value clears the label", Action: func(context *cli.Context) error { namespace, labels := commands.ObjectWithLabelArgs(context) if namespace == "" { @@ -167,7 +167,7 @@ var removeCommand = cli.Command{ if err := namespaces.Delete(ctx, target, opts...); err != nil { if !errdefs.IsNotFound(err) { if exitErr == nil { - exitErr = errors.Wrapf(err, "unable to delete %v", target) + exitErr = fmt.Errorf("unable to delete %v: %w", target, err) } log.G(ctx).WithError(err).Errorf("unable to delete %v", target) continue diff --git a/cmd/ctr/commands/namespaces/namespaces_other.go b/cmd/ctr/commands/namespaces/namespaces_other.go index b0f12e5..4e99d60 100644 --- a/cmd/ctr/commands/namespaces/namespaces_other.go +++ b/cmd/ctr/commands/namespaces/namespaces_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/cmd/ctr/commands/oci/oci.go b/cmd/ctr/commands/oci/oci.go index d27d026..cbd64fe 100644 --- a/cmd/ctr/commands/oci/oci.go +++ b/cmd/ctr/commands/oci/oci.go @@ -17,7 +17,8 @@ package oci import ( - "github.com/pkg/errors" + "fmt" + "github.com/urfave/cli" "github.com/containerd/containerd/cmd/ctr/commands" @@ -43,7 +44,7 @@ var defaultSpecCommand = cli.Command{ spec, err := oci.GenerateSpec(ctx, nil, &containers.Container{}) if err != nil { - return errors.Wrap(err, "failed to generate spec") + return fmt.Errorf("failed to generate spec: %w", err) } commands.PrintAsJSON(spec) diff --git a/cmd/ctr/commands/pprof/pprof.go b/cmd/ctr/commands/pprof/pprof.go index acaf2f9..1eafcf2 100644 --- a/cmd/ctr/commands/pprof/pprof.go +++ b/cmd/ctr/commands/pprof/pprof.go @@ -24,7 +24,6 @@ import ( "time" "github.com/containerd/containerd/defaults" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -183,7 +182,7 @@ func httpGetRequest(client *http.Client, request string) (io.ReadCloser, error) return nil, err } if resp.StatusCode != 200 { - return nil, errors.Errorf("http get failed with status: %s", resp.Status) + return nil, fmt.Errorf("http get failed with status: %s", resp.Status) } return resp.Body, nil } diff --git a/cmd/ctr/commands/pprof/pprof_unix.go b/cmd/ctr/commands/pprof/pprof_unix.go index 9314ca5..de662a2 100644 --- a/cmd/ctr/commands/pprof/pprof_unix.go +++ b/cmd/ctr/commands/pprof/pprof_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/cmd/ctr/commands/resolver.go b/cmd/ctr/commands/resolver.go index 3571ecf..729d514 100644 --- a/cmd/ctr/commands/resolver.go +++ b/cmd/ctr/commands/resolver.go @@ -21,12 +21,13 @@ import ( gocontext "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptrace" "net/http/httputil" + "os" "strings" "github.com/containerd/console" @@ -34,7 +35,6 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker/config" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -46,12 +46,12 @@ func passwordPrompt() (string, error) { defer c.Reset() if err := c.DisableEcho(); err != nil { - return "", errors.Wrap(err, "failed to disable echo") + return "", fmt.Errorf("failed to disable echo: %w", err) } line, _, err := bufio.NewReader(c).ReadLine() if err != nil { - return "", errors.Wrap(err, "failed to read line") + return "", fmt.Errorf("failed to read line: %w", err) } return string(line), nil } @@ -124,9 +124,9 @@ func resolverDefaultTLS(clicontext *cli.Context) (*tls.Config, error) { } if tlsRootPath := clicontext.String("tlscacert"); tlsRootPath != "" { - tlsRootData, err := ioutil.ReadFile(tlsRootPath) + tlsRootData, err := os.ReadFile(tlsRootPath) if err != nil { - return nil, errors.Wrapf(err, "failed to read %q", tlsRootPath) + return nil, fmt.Errorf("failed to read %q: %w", tlsRootPath, err) } config.RootCAs = x509.NewCertPool() @@ -143,7 +143,7 @@ func resolverDefaultTLS(clicontext *cli.Context) (*tls.Config, error) { } keyPair, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) if err != nil { - return nil, errors.Wrapf(err, "failed to load TLS client credentials (cert=%q, key=%q)", tlsCertPath, tlsKeyPath) + return nil, fmt.Errorf("failed to load TLS client credentials (cert=%q, key=%q): %w", tlsCertPath, tlsKeyPath, err) } config.Certificates = []tls.Certificate{keyPair} } @@ -161,7 +161,7 @@ type DebugTransport struct { func (t DebugTransport) RoundTrip(req *http.Request) (*http.Response, error) { in, err := httputil.DumpRequest(req, true) if err != nil { - return nil, errors.Wrap(err, "failed to dump request") + return nil, fmt.Errorf("failed to dump request: %w", err) } if _, err := t.writer.Write(in); err != nil { @@ -175,7 +175,7 @@ func (t DebugTransport) RoundTrip(req *http.Request) (*http.Response, error) { out, err := httputil.DumpResponse(resp, true) if err != nil { - return nil, errors.Wrap(err, "failed to dump response") + return nil, fmt.Errorf("failed to dump response: %w", err) } if _, err := t.writer.Write(out); err != nil { diff --git a/cmd/ctr/commands/run/run.go b/cmd/ctr/commands/run/run.go index 697e579..55cff7a 100644 --- a/cmd/ctr/commands/run/run.go +++ b/cmd/ctr/commands/run/run.go @@ -20,6 +20,7 @@ import ( "context" gocontext "context" "encoding/csv" + "errors" "fmt" "strings" @@ -34,7 +35,6 @@ import ( "github.com/containerd/containerd/oci" gocni "github.com/containerd/go-cni" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) @@ -64,8 +64,8 @@ func parseMountFlag(m string) (specs.Mount, error) { } for _, field := range fields { - v := strings.Split(field, "=") - if len(v) != 2 { + v := strings.SplitN(field, "=", 2) + if len(v) < 2 { return mount, fmt.Errorf("invalid mount specification: expected key=val") } @@ -97,7 +97,7 @@ var Command = cli.Command{ Flags: append([]cli.Flag{ cli.BoolFlag{ Name: "rm", - Usage: "remove the container after running", + Usage: "remove the container after running, cannot be used with --detach", }, cli.BoolFlag{ Name: "null-io", @@ -109,7 +109,7 @@ var Command = cli.Command{ }, cli.BoolFlag{ Name: "detach,d", - Usage: "detach from the task after it has started execution", + Usage: "detach from the task after it has started execution, cannot be used with --rm", }, cli.StringFlag{ Name: "fifo-dir", @@ -123,13 +123,20 @@ var Command = cli.Command{ Name: "platform", Usage: "run image for specific platform", }, - }, append(platformRunFlags, append(commands.SnapshotterFlags, commands.ContainerFlags...)...)...), + cli.BoolFlag{ + Name: "cni", + Usage: "enable cni networking for the container", + }, + }, append(platformRunFlags, + append(append(commands.SnapshotterFlags, []cli.Flag{commands.SnapshotterLabels}...), + commands.ContainerFlags...)...)...), Action: func(context *cli.Context) error { var ( err error id string ref string + rm = context.Bool("rm") tty = context.Bool("tty") detach = context.Bool("detach") config = context.IsSet("config") @@ -152,6 +159,10 @@ var Command = cli.Command{ if id == "" { return errors.New("container id must be provided") } + if rm && detach { + return errors.New("flags --detach and --rm cannot be specified together") + } + client, ctx, cancel, err := commands.NewClient(context) if err != nil { return err @@ -161,7 +172,7 @@ var Command = cli.Command{ if err != nil { return err } - if context.Bool("rm") && !detach { + if rm && !detach { defer container.Delete(ctx, containerd.WithSnapshotCleanup) } var con console.Console @@ -207,7 +218,12 @@ var Command = cli.Command{ } } if enableCNI { - if _, err := network.Setup(ctx, fullID(ctx, container), fmt.Sprintf("/proc/%d/ns/net", task.Pid())); err != nil { + netNsPath, err := getNetNSPath(ctx, task) + if err != nil { + return err + } + + if _, err := network.Setup(ctx, fullID(ctx, container), netNsPath); err != nil { return err } } diff --git a/cmd/ctr/commands/run/run_unix.go b/cmd/ctr/commands/run/run_unix.go index a277e22..6ee3940 100644 --- a/cmd/ctr/commands/run/run_unix.go +++ b/cmd/ctr/commands/run/run_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,13 +21,16 @@ package run import ( gocontext "context" + "errors" "fmt" + "os" "path/filepath" "strconv" "strings" "github.com/containerd/containerd" "github.com/containerd/containerd/cmd/ctr/commands" + "github.com/containerd/containerd/containers" "github.com/containerd/containerd/contrib/apparmor" "github.com/containerd/containerd/contrib/nvidia" "github.com/containerd/containerd/contrib/seccomp" @@ -34,8 +38,8 @@ import ( runtimeoptions "github.com/containerd/containerd/pkg/runtimeoptions/v1" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/runtime/v2/runc/options" + "github.com/containerd/containerd/snapshots" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) @@ -70,9 +74,10 @@ var platformRunFlags = []cli.Flag{ Usage: "set the CFS cpu quota", Value: 0.0, }, - cli.BoolFlag{ - Name: "cni", - Usage: "enable cni networking for the container", + cli.IntFlag{ + Name: "cpu-shares", + Usage: "set the cpu shares", + Value: 1024, }, } @@ -95,7 +100,7 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli ) if config { - cOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice("labels")))) + cOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice("label")))) opts = append(opts, oci.WithSpecFromFile(context.String("config"))) } else { var ( @@ -116,7 +121,7 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli return nil, err } opts = append(opts, oci.WithRootFSPath(rootfs)) - cOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice("labels")))) + cOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice("label")))) } else { snapshotter := context.String("snapshotter") var image containerd.Image @@ -174,7 +179,10 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli // Even when "read-only" is set, we don't use KindView snapshot here. (#1495) // We pass writable snapshot to the OCI runtime, and the runtime remounts it as read-only, // after creating some mount points on demand. - cOpts = append(cOpts, containerd.WithNewSnapshot(id, image)) + // For some snapshotter, such as overlaybd, it can provide 2 kind of writable snapshot(overlayfs dir or block-device) + // by command label values. + cOpts = append(cOpts, containerd.WithNewSnapshot(id, image, + snapshots.WithLabels(commands.LabelArgs(context.StringSlice("snapshotter-label"))))) } cOpts = append(cOpts, containerd.WithImageStopSignal(image, "SIGTERM")) } @@ -194,7 +202,41 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli opts = append(opts, oci.WithPrivileged, oci.WithAllDevicesAllowed, oci.WithHostDevices) } if context.Bool("net-host") { - opts = append(opts, oci.WithHostNamespace(specs.NetworkNamespace), oci.WithHostHostsFile, oci.WithHostResolvconf) + hostname, err := os.Hostname() + if err != nil { + return nil, fmt.Errorf("get hostname: %w", err) + } + opts = append(opts, + oci.WithHostNamespace(specs.NetworkNamespace), + oci.WithHostHostsFile, + oci.WithHostResolvconf, + oci.WithEnv([]string{fmt.Sprintf("HOSTNAME=%s", hostname)}), + ) + } + if annoStrings := context.StringSlice("annotation"); len(annoStrings) > 0 { + annos, err := commands.AnnotationArgs(annoStrings) + if err != nil { + return nil, err + } + opts = append(opts, oci.WithAnnotations(annos)) + } + + if caps := context.StringSlice("cap-add"); len(caps) > 0 { + for _, cap := range caps { + if !strings.HasPrefix(cap, "CAP_") { + return nil, fmt.Errorf("capabilities must be specified with 'CAP_' prefix") + } + } + opts = append(opts, oci.WithAddedCapabilities(caps)) + } + + if caps := context.StringSlice("cap-drop"); len(caps) > 0 { + for _, cap := range caps { + if !strings.HasPrefix(cap, "CAP_") { + return nil, fmt.Errorf("capabilities must be specified with 'CAP_' prefix") + } + } + opts = append(opts, oci.WithDroppedCapabilities(caps)) } seccompProfile := context.String("seccomp-profile") @@ -230,6 +272,10 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli opts = append(opts, oci.WithCPUCFS(quota, period)) } + if shares := context.Int("cpu-shares"); shares > 0 { + opts = append(opts, oci.WithCPUShares(uint64(shares))) + } + quota := context.Int64("cpu-quota") period := context.Uint64("cpu-period") if quota != -1 || period != 0 { @@ -254,7 +300,7 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli })) } if context.IsSet("gpus") { - opts = append(opts, nvidia.WithGPUs(nvidia.WithDevices(context.Int("gpus")), nvidia.WithAllCapabilities)) + opts = append(opts, nvidia.WithGPUs(nvidia.WithDevices(context.IntSlice("gpus")...), nvidia.WithAllCapabilities)) } if context.IsSet("allow-new-privs") { opts = append(opts, oci.WithNewPrivileges) @@ -270,6 +316,25 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli for _, dev := range context.StringSlice("device") { opts = append(opts, oci.WithDevices(dev, "", "rwm")) } + + rootfsPropagation := context.String("rootfs-propagation") + if rootfsPropagation != "" { + opts = append(opts, func(_ gocontext.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error { + if s.Linux != nil { + s.Linux.RootfsPropagation = rootfsPropagation + } else { + s.Linux = &specs.Linux{ + RootfsPropagation: rootfsPropagation, + } + } + + return nil + }) + } + + if c := context.String("rdt-class"); c != "" { + opts = append(opts, oci.WithRdt(c, "", "")) + } } runtimeOpts, err := getRuntimeOptions(context) @@ -359,15 +424,15 @@ func parseIDMapping(mapping string) (specs.LinuxIDMapping, error) { } cID, err := strconv.ParseUint(parts[0], 0, 32) if err != nil { - return specs.LinuxIDMapping{}, errors.Wrapf(err, "invalid container id for user namespace remapping") + return specs.LinuxIDMapping{}, fmt.Errorf("invalid container id for user namespace remapping: %w", err) } hID, err := strconv.ParseUint(parts[1], 0, 32) if err != nil { - return specs.LinuxIDMapping{}, errors.Wrapf(err, "invalid host id for user namespace remapping") + return specs.LinuxIDMapping{}, fmt.Errorf("invalid host id for user namespace remapping: %w", err) } size, err := strconv.ParseUint(parts[2], 0, 32) if err != nil { - return specs.LinuxIDMapping{}, errors.Wrapf(err, "invalid size for user namespace remapping") + return specs.LinuxIDMapping{}, fmt.Errorf("invalid size for user namespace remapping: %w", err) } return specs.LinuxIDMapping{ ContainerID: uint32(cID), @@ -391,3 +456,7 @@ func validNamespace(ns string) bool { return false } } + +func getNetNSPath(_ gocontext.Context, task containerd.Task) (string, error) { + return fmt.Sprintf("/proc/%d/ns/net", task.Pid()), nil +} diff --git a/cmd/ctr/commands/run/run_windows.go b/cmd/ctr/commands/run/run_windows.go index 292d60e..a2d85d7 100644 --- a/cmd/ctr/commands/run/run_windows.go +++ b/cmd/ctr/commands/run/run_windows.go @@ -18,14 +18,15 @@ package run import ( gocontext "context" + "errors" "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" "github.com/containerd/console" "github.com/containerd/containerd" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/pkg/netns" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) @@ -116,6 +117,13 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli if context.Bool("net-host") { return nil, errors.New("Cannot use host mode networking with Windows containers") } + if context.Bool("cni") { + ns, err := netns.NewNetNS("") + if err != nil { + return nil, err + } + opts = append(opts, oci.WithWindowsNetworkNamespace(ns.GetPath())) + } if context.Bool("isolated") { opts = append(opts, oci.WithWindowsHyperV) } @@ -149,3 +157,14 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli func getNewTaskOpts(_ *cli.Context) []containerd.NewTaskOpts { return nil } + +func getNetNSPath(ctx gocontext.Context, t containerd.Task) (string, error) { + s, err := t.Spec(ctx) + if err != nil { + return "", err + } + if s.Windows == nil || s.Windows.Network == nil { + return "", nil + } + return s.Windows.Network.NetworkNamespace, nil +} diff --git a/cmd/ctr/commands/shim/io_unix.go b/cmd/ctr/commands/shim/io_unix.go index 1200e19..7806ad8 100644 --- a/cmd/ctr/commands/shim/io_unix.go +++ b/cmd/ctr/commands/shim/io_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/cmd/ctr/commands/shim/shim.go b/cmd/ctr/commands/shim/shim.go index c210dbc..36d75e3 100644 --- a/cmd/ctr/commands/shim/shim.go +++ b/cmd/ctr/commands/shim/shim.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,9 +21,10 @@ package shim import ( gocontext "context" + "errors" "fmt" - "io/ioutil" "net" + "os" "path/filepath" "strings" @@ -35,7 +37,6 @@ import ( "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) @@ -173,7 +174,7 @@ var execCommand = cli.Command{ } // read spec file and extract Any object - spec, err := ioutil.ReadFile(context.String("spec")) + spec, err := os.ReadFile(context.String("spec")) if err != nil { return err } diff --git a/cmd/ctr/commands/signals_notlinux.go b/cmd/ctr/commands/signals_notlinux.go index 6a9dccb..1e0fbe6 100644 --- a/cmd/ctr/commands/signals_notlinux.go +++ b/cmd/ctr/commands/signals_notlinux.go @@ -1,4 +1,5 @@ -//+build !linux +//go:build !linux +// +build !linux /* Copyright The containerd Authors. diff --git a/cmd/ctr/commands/snapshots/snapshots.go b/cmd/ctr/commands/snapshots/snapshots.go index cb8f059..1d69b0a 100644 --- a/cmd/ctr/commands/snapshots/snapshots.go +++ b/cmd/ctr/commands/snapshots/snapshots.go @@ -18,6 +18,7 @@ package snapshots import ( gocontext "context" + "errors" "fmt" "io" "os" @@ -35,7 +36,6 @@ import ( "github.com/containerd/containerd/snapshots" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -133,8 +133,6 @@ var diffCommand = cli.Command{ labels := commands.LabelArgs(context.StringSlice("label")) snapshotter := client.SnapshotService(context.GlobalString("snapshotter")) - fmt.Println(context.String("media-type")) - if context.Bool("keep") { labels["containerd.io/gc.root"] = time.Now().UTC().Format(time.RFC3339) } @@ -164,6 +162,7 @@ var diffCommand = cli.Command{ if err != nil { return err } + defer ra.Close() _, err = io.Copy(os.Stdout, content.NewReader(ra)) return err @@ -249,8 +248,8 @@ var usageCommand = cli.Command{ } var removeCommand = cli.Command{ - Name: "remove", - Aliases: []string{"rm"}, + Name: "delete", + Aliases: []string{"del", "remove", "rm"}, ArgsUsage: " [, ...]", Usage: "remove snapshots", Action: func(context *cli.Context) error { @@ -263,7 +262,7 @@ var removeCommand = cli.Command{ for _, key := range context.Args() { err = snapshotter.Remove(ctx, key) if err != nil { - return errors.Wrapf(err, "failed to remove %q", key) + return fmt.Errorf("failed to remove %q: %w", key, err) } } diff --git a/cmd/ctr/commands/tasks/checkpoint.go b/cmd/ctr/commands/tasks/checkpoint.go index e6d1b73..3271aa1 100644 --- a/cmd/ctr/commands/tasks/checkpoint.go +++ b/cmd/ctr/commands/tasks/checkpoint.go @@ -17,6 +17,7 @@ package tasks import ( + "errors" "fmt" "github.com/containerd/containerd" @@ -24,7 +25,6 @@ import ( "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime/linux/runctypes" "github.com/containerd/containerd/runtime/v2/runc/options" - "github.com/pkg/errors" "github.com/urfave/cli" ) diff --git a/cmd/ctr/commands/tasks/delete.go b/cmd/ctr/commands/tasks/delete.go index 9f43655..c0edbe7 100644 --- a/cmd/ctr/commands/tasks/delete.go +++ b/cmd/ctr/commands/tasks/delete.go @@ -30,7 +30,7 @@ var deleteCommand = cli.Command{ Name: "delete", Usage: "delete one or more tasks", ArgsUsage: "CONTAINER [CONTAINER, ...]", - Aliases: []string{"rm"}, + Aliases: []string{"del", "remove", "rm"}, Flags: []cli.Flag{ cli.BoolFlag{ Name: "force, f", diff --git a/cmd/ctr/commands/tasks/exec.go b/cmd/ctr/commands/tasks/exec.go index 3f31e27..dd29525 100644 --- a/cmd/ctr/commands/tasks/exec.go +++ b/cmd/ctr/commands/tasks/exec.go @@ -31,7 +31,6 @@ import ( "github.com/urfave/cli" ) -//TODO:(jessvalarezo) exec-id is optional here, update to required arg var execCommand = cli.Command{ Name: "exec", Usage: "execute additional processes in an existing container", @@ -51,8 +50,9 @@ var execCommand = cli.Command{ Usage: "detach from the task after it has started execution", }, cli.StringFlag{ - Name: "exec-id", - Usage: "exec specific id for the process", + Name: "exec-id", + Required: true, + Usage: "exec specific id for the process", }, cli.StringFlag{ Name: "fifo-dir", @@ -104,6 +104,10 @@ var execCommand = cli.Command{ pspec.Terminal = tty pspec.Args = args + if cwd := context.String("cwd"); cwd != "" { + pspec.Cwd = cwd + } + task, err := container.Task(ctx, nil) if err != nil { return err @@ -114,29 +118,35 @@ var execCommand = cli.Command{ stdinC = &stdinCloser{ stdin: os.Stdin, } + con console.Console ) - if logURI := context.String("log-uri"); logURI != "" { + fifoDir := context.String("fifo-dir") + logURI := context.String("log-uri") + ioOpts := []cio.Opt{cio.WithFIFODir(fifoDir)} + switch { + case tty && logURI != "": + return errors.New("can't use log-uri with tty") + case logURI != "" && fifoDir != "": + return errors.New("can't use log-uri with fifo-dir") + + case tty: + con = console.Current() + defer con.Reset() + if err := con.SetRaw(); err != nil { + return err + } + ioCreator = cio.NewCreator(append([]cio.Opt{cio.WithStreams(con, con, nil), cio.WithTerminal}, ioOpts...)...) + + case logURI != "": uri, err := url.Parse(logURI) if err != nil { return err } - - if dir := context.String("fifo-dir"); dir != "" { - return errors.New("can't use log-uri with fifo-dir") - } - - if tty { - return errors.New("can't use log-uri with tty") - } - ioCreator = cio.LogURI(uri) - } else { - cioOpts := []cio.Opt{cio.WithStreams(stdinC, os.Stdout, os.Stderr), cio.WithFIFODir(context.String("fifo-dir"))} - if tty { - cioOpts = append(cioOpts, cio.WithTerminal) - } - ioCreator = cio.NewCreator(cioOpts...) + + default: + ioCreator = cio.NewCreator(append([]cio.Opt{cio.WithStreams(stdinC, os.Stdout, os.Stderr)}, ioOpts...)...) } process, err := task.Exec(ctx, context.String("exec-id"), pspec, ioCreator) @@ -156,31 +166,20 @@ var execCommand = cli.Command{ return err } - var con console.Console - if tty { - con = console.Current() - defer con.Reset() - if err := con.SetRaw(); err != nil { - return err - } - } - if !detach { - if tty { - if err := HandleConsoleResize(ctx, process, con); err != nil { - logrus.WithError(err).Error("console resize") - } - } else { - sigc := commands.ForwardAllSignals(ctx, process) - defer commands.StopCatch(sigc) - } - } - if err := process.Start(ctx); err != nil { return err } if detach { return nil } + if tty { + if err := HandleConsoleResize(ctx, process, con); err != nil { + logrus.WithError(err).Error("console resize") + } + } else { + sigc := commands.ForwardAllSignals(ctx, process) + defer commands.StopCatch(sigc) + } status := <-statusC code, _, err := status.Result() if err != nil { diff --git a/cmd/ctr/commands/tasks/kill.go b/cmd/ctr/commands/tasks/kill.go index 080ffa0..3aef2c9 100644 --- a/cmd/ctr/commands/tasks/kill.go +++ b/cmd/ctr/commands/tasks/kill.go @@ -17,9 +17,11 @@ package tasks import ( + "errors" + "github.com/containerd/containerd" "github.com/containerd/containerd/cmd/ctr/commands" - "github.com/pkg/errors" + "github.com/moby/sys/signal" "github.com/urfave/cli" ) @@ -49,7 +51,7 @@ var killCommand = cli.Command{ if id == "" { return errors.New("container id must be provided") } - signal, err := containerd.ParseSignal(defaultSignal) + sig, err := signal.ParseSignal(defaultSignal) if err != nil { return err } @@ -77,12 +79,12 @@ var killCommand = cli.Command{ return err } if context.String("signal") != "" { - signal, err = containerd.ParseSignal(context.String("signal")) + sig, err = signal.ParseSignal(context.String("signal")) if err != nil { return err } } else { - signal, err = containerd.GetStopSignal(ctx, container, signal) + sig, err = containerd.GetStopSignal(ctx, container, sig) if err != nil { return err } @@ -91,6 +93,6 @@ var killCommand = cli.Command{ if err != nil { return err } - return task.Kill(ctx, signal, opts...) + return task.Kill(ctx, sig, opts...) }, } diff --git a/cmd/ctr/commands/tasks/metrics.go b/cmd/ctr/commands/tasks/metrics.go index a83e45e..b2c18f2 100644 --- a/cmd/ctr/commands/tasks/metrics.go +++ b/cmd/ctr/commands/tasks/metrics.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. diff --git a/cmd/ctr/commands/tasks/ps.go b/cmd/ctr/commands/tasks/ps.go index 0442a1e..cb444b3 100644 --- a/cmd/ctr/commands/tasks/ps.go +++ b/cmd/ctr/commands/tasks/ps.go @@ -17,13 +17,13 @@ package tasks import ( + "errors" "fmt" "os" "text/tabwriter" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/typeurl" - "github.com/pkg/errors" "github.com/urfave/cli" ) diff --git a/cmd/ctr/commands/tasks/start.go b/cmd/ctr/commands/tasks/start.go index de55767..4639c28 100644 --- a/cmd/ctr/commands/tasks/start.go +++ b/cmd/ctr/commands/tasks/start.go @@ -17,11 +17,12 @@ package tasks import ( + "errors" + "github.com/containerd/console" "github.com/containerd/containerd" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/cmd/ctr/commands" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) diff --git a/cmd/ctr/commands/tasks/tasks_unix.go b/cmd/ctr/commands/tasks/tasks_unix.go index 0ec6dc0..0e5311a 100644 --- a/cmd/ctr/commands/tasks/tasks_unix.go +++ b/cmd/ctr/commands/tasks/tasks_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,6 +21,7 @@ package tasks import ( gocontext "context" + "errors" "net/url" "os" "os/signal" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/log" - "github.com/pkg/errors" "github.com/urfave/cli" "golang.org/x/sys/unix" ) diff --git a/cmd/ctr/commands/tasks/tasks_windows.go b/cmd/ctr/commands/tasks/tasks_windows.go index 8905c5b..0d5e19b 100644 --- a/cmd/ctr/commands/tasks/tasks_windows.go +++ b/cmd/ctr/commands/tasks/tasks_windows.go @@ -18,6 +18,7 @@ package tasks import ( gocontext "context" + "errors" "net/url" "time" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/log" - "github.com/pkg/errors" "github.com/urfave/cli" ) diff --git a/cmd/ctr/main.go b/cmd/ctr/main.go index cf72de2..d675536 100644 --- a/cmd/ctr/main.go +++ b/cmd/ctr/main.go @@ -21,13 +21,14 @@ import ( "os" "github.com/containerd/containerd/cmd/ctr/app" - "github.com/containerd/containerd/pkg/seed" + "github.com/containerd/containerd/pkg/seed" //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies "github.com/urfave/cli" ) var pluginCmds = []cli.Command{} func init() { + //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies seed.WithTimeAndRand() } diff --git a/cmd/gen-manpages/main.go b/cmd/gen-manpages/main.go index 6f3d2f1..61ec6a3 100644 --- a/cmd/gen-manpages/main.go +++ b/cmd/gen-manpages/main.go @@ -19,7 +19,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -57,13 +56,12 @@ func run() error { // clear out the usage as we use banners that do not display in man pages appName.Usage = "" - appName.ToMan() data, err := appName.ToMan() if err != nil { return err } _ = os.MkdirAll(dir, os.ModePerm) - if err := ioutil.WriteFile(filepath.Join(dir, fmt.Sprintf("%s.%s", name, section)), []byte(data), 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, fmt.Sprintf("%s.%s", name, section)), []byte(data), 0644); err != nil { return err } return nil diff --git a/container.go b/container.go index d5da55e..2cf1566 100644 --- a/container.go +++ b/container.go @@ -19,6 +19,7 @@ package containerd import ( "context" "encoding/json" + "fmt" "os" "path/filepath" "strings" @@ -38,7 +39,6 @@ import ( ver "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" ) const ( @@ -173,7 +173,7 @@ func (c *container) Spec(ctx context.Context) (*oci.Spec, error) { // an error is returned if the container has running tasks func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error { if _, err := c.loadTask(ctx, nil); err == nil { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.id) + return fmt.Errorf("cannot delete running task %v: %w", c.id, errdefs.ErrFailedPrecondition) } r, err := c.get(ctx) if err != nil { @@ -198,11 +198,11 @@ func (c *container) Image(ctx context.Context) (Image, error) { return nil, err } if r.Image == "" { - return nil, errors.Wrap(errdefs.ErrNotFound, "container not created from an image") + return nil, fmt.Errorf("container not created from an image: %w", errdefs.ErrNotFound) } i, err := c.client.ImageService().Get(ctx, r.Image) if err != nil { - return nil, errors.Wrapf(err, "failed to get image %s for container", r.Image) + return nil, fmt.Errorf("failed to get image %s for container: %w", r.Image, err) } return NewImage(c.client, i), nil } @@ -232,7 +232,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N } if r.SnapshotKey != "" { if r.Snapshotter == "" { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container") + return nil, fmt.Errorf("unable to resolve rootfs mounts without snapshotter on container: %w", errdefs.ErrInvalidArgument) } // get the rootfs from the snapshotter and add it to the request @@ -279,6 +279,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N }) } } + request.RuntimePath = info.RuntimePath if info.Options != nil { any, err := typeurl.MarshalAny(info.Options) if err != nil { @@ -391,7 +392,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er if err != nil { err = errdefs.FromGRPC(err) if errdefs.IsNotFound(err) { - return nil, errors.Wrapf(err, "no running task found") + return nil, fmt.Errorf("no running task found: %w", err) } return nil, err } diff --git a/container_checkpoint_opts.go b/container_checkpoint_opts.go index 5108636..a64ef61 100644 --- a/container_checkpoint_opts.go +++ b/container_checkpoint_opts.go @@ -19,6 +19,7 @@ package containerd import ( "bytes" "context" + "errors" "fmt" "runtime" @@ -31,7 +32,6 @@ import ( "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/typeurl" imagespec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( diff --git a/container_opts.go b/container_opts.go index 024d6e1..4d630ea 100644 --- a/container_opts.go +++ b/container_opts.go @@ -19,6 +19,7 @@ package containerd import ( "context" "encoding/json" + "errors" "fmt" "github.com/containerd/containerd/containers" @@ -31,7 +32,6 @@ import ( "github.com/gogo/protobuf/types" "github.com/opencontainers/image-spec/identity" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // DeleteOpts allows the caller to set options for the deletion of a container @@ -227,7 +227,7 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error { if c.SnapshotKey != "" { if c.Snapshotter == "" { - return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot") + return fmt.Errorf("container.Snapshotter must be set to cleanup rootfs snapshot: %w", errdefs.ErrInvalidArgument) } s, err := client.getSnapshotter(ctx, c.Snapshotter) if err != nil { @@ -276,15 +276,15 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer func WithContainerExtension(name string, extension interface{}) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { if name == "" { - return errors.Wrapf(errdefs.ErrInvalidArgument, "extension key must not be zero-length") + return fmt.Errorf("extension key must not be zero-length: %w", errdefs.ErrInvalidArgument) } any, err := typeurl.MarshalAny(extension) if err != nil { if errors.Is(err, typeurl.ErrNotFound) { - return errors.Wrapf(err, "extension %q is not registered with the typeurl package, see `typeurl.Register`", name) + return fmt.Errorf("extension %q is not registered with the typeurl package, see `typeurl.Register`: %w", name, err) } - return errors.Wrap(err, "error marshalling extension") + return fmt.Errorf("error marshalling extension: %w", err) } if c.Extensions == nil { diff --git a/container_opts_unix.go b/container_opts_unix.go index b109a10..b6fc37d 100644 --- a/container_opts_unix.go +++ b/container_opts_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/container_restore_opts.go b/container_restore_opts.go index fb60e8d..bdc8650 100644 --- a/container_restore_opts.go +++ b/container_restore_opts.go @@ -18,6 +18,8 @@ package containerd import ( "context" + "errors" + "fmt" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" @@ -26,7 +28,6 @@ import ( ptypes "github.com/gogo/protobuf/types" "github.com/opencontainers/image-spec/identity" imagespec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( @@ -46,7 +47,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint return func(ctx context.Context, client *Client, c *containers.Container) error { name, ok := index.Annotations[checkpointImageNameLabel] if !ok || name == "" { - return ErrRuntimeNameNotFoundInIndex + return ErrImageNameNotFoundInIndex } snapshotter, ok := index.Annotations[checkpointSnapshotterNameLabel] if !ok || name == "" { @@ -92,7 +93,7 @@ func WithRestoreRuntime(ctx context.Context, id string, client *Client, checkpoi store := client.ContentStore() data, err := content.ReadBlob(ctx, store, *m) if err != nil { - return errors.Wrap(err, "unable to read checkpoint runtime") + return fmt.Errorf("unable to read checkpoint runtime: %w", err) } if err := proto.Unmarshal(data, &options); err != nil { return err @@ -117,7 +118,7 @@ func WithRestoreSpec(ctx context.Context, id string, client *Client, checkpoint store := client.ContentStore() data, err := content.ReadBlob(ctx, store, *m) if err != nil { - return errors.Wrap(err, "unable to read checkpoint config") + return fmt.Errorf("unable to read checkpoint config: %w", err) } var any ptypes.Any if err := proto.Unmarshal(data, &any); err != nil { diff --git a/containerstore.go b/containerstore.go index 2756e2a..bdd1c60 100644 --- a/containerstore.go +++ b/containerstore.go @@ -189,6 +189,7 @@ func containersFromProto(containerspb []containersapi.Container) []containers.Co var containers []containers.Container for _, container := range containerspb { + container := container containers = append(containers, containerFromProto(&container)) } diff --git a/content/adaptor_test.go b/content/adaptor_test.go new file mode 100644 index 0000000..0479f8a --- /dev/null +++ b/content/adaptor_test.go @@ -0,0 +1,89 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAdaptInfo(t *testing.T) { + tests := []struct { + name string + info Info + fieldpath []string + wantValue string + wantPresent bool + }{ + { + "empty fieldpath", + Info{}, + []string{}, + "", + false, + }, + { + "digest fieldpath", + Info{ + Digest: "foo", + }, + []string{"digest"}, + "foo", + true, + }, + { + "size fieldpath", + Info{}, + []string{"size"}, + "", + false, + }, + { + "labels fieldpath", + Info{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + []string{"labels", "foo"}, + "bar", + true, + }, + { + "labels join fieldpath", + Info{ + Labels: map[string]string{ + "foo.bar.qux": "quux", + }, + }, + []string{"labels", "foo", "bar", "qux"}, + "quux", + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + adaptInfo := AdaptInfo(tt.info) + + value, present := adaptInfo.Field(tt.fieldpath) + + assert.Equal(t, tt.wantValue, value) + assert.Equal(t, tt.wantPresent, present) + }) + } +} diff --git a/content/helpers.go b/content/helpers.go index 00fae1f..23ddc45 100644 --- a/content/helpers.go +++ b/content/helpers.go @@ -18,18 +18,24 @@ package content import ( "context" + "errors" + "fmt" "io" - "io/ioutil" - "math/rand" "sync" "time" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/randutil" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) +// maxResets is the no.of times the Copy() method can tolerate a reset of the body +const maxResets = 5 + +var ErrReset = errors.New("writer has been reset") + var bufPool = sync.Pool{ New: func() interface{} { buffer := make([]byte, 1<<20) @@ -77,10 +83,10 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) if err != nil { if !errdefs.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to open writer") + return fmt.Errorf("failed to open writer: %w", err) } - return nil // all ready present + return nil // already present } defer cw.Close() @@ -107,7 +113,7 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er // error or abort. Requires asserting for an ingest manager select { - case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))): + case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))): if retry < 2048 { retry = retry << 1 } @@ -131,35 +137,63 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er // the size or digest is unknown, these values may be empty. // // Copy is buffered, so no need to wrap reader in buffered io. -func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error { +func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error { ws, err := cw.Status() if err != nil { - return errors.Wrap(err, "failed to get status") + return fmt.Errorf("failed to get status: %w", err) } - + r := or if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, size) + r, err = seekReader(or, ws.Offset, size) if err != nil { - return errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) } } - copied, err := copyWithBuffer(cw, r) - if err != nil { - return errors.Wrap(err, "failed to copy") - } - if size != 0 && copied < size-ws.Offset { - // Short writes would return its own error, this indicates a read failure - return errors.Wrapf(io.ErrUnexpectedEOF, "failed to read expected number of bytes") - } - - if err := cw.Commit(ctx, size, expected, opts...); err != nil { - if !errdefs.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) + for i := 0; i < maxResets; i++ { + if i >= 1 { + log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset") } + copied, err := copyWithBuffer(cw, r) + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if size != 0 && copied < size-ws.Offset { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + if err := cw.Commit(ctx, size, expected, opts...); err != nil { + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } + } + return nil } - return nil + log.G(ctx).WithField("digest", expected).Errorf("failed to copy after %d retries", maxResets) + return fmt.Errorf("failed to copy after %d retries", maxResets) } // CopyReaderAt copies to a writer from a given reader at for the given @@ -172,11 +206,11 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) if err != nil { - return errors.Wrap(err, "failed to copy") + return fmt.Errorf("failed to copy: %w", err) } if copied < n { // Short writes would return its own error, this indicates a read failure - return errors.Wrap(io.ErrUnexpectedEOF, "failed to read expected number of bytes") + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) } return nil } @@ -190,13 +224,13 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { func CopyReader(cw Writer, r io.Reader) (int64, error) { ws, err := cw.Status() if err != nil { - return 0, errors.Wrap(err, "failed to get status") + return 0, fmt.Errorf("failed to get status: %w", err) } if ws.Offset > 0 { r, err = seekReader(r, ws.Offset, 0) if err != nil { - return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) } } @@ -212,7 +246,10 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { if ok { nn, err := seeker.Seek(offset, io.SeekStart) if nn != offset { - return nil, errors.Wrapf(err, "failed to seek to offset %v", offset) + if err == nil { + err = fmt.Errorf("unexpected seek location without seek error") + } + return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err) } if err != nil { @@ -230,12 +267,12 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { } // well then, let's just discard up to the offset - n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset)) + n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset)) if err != nil { - return nil, errors.Wrap(err, "failed to discard to offset") + return nil, fmt.Errorf("failed to discard to offset: %w", err) } if n != offset { - return nil, errors.Errorf("unable to discard to offset") + return nil, errors.New("unable to discard to offset") } return r, nil diff --git a/content/helpers_test.go b/content/helpers_test.go index be52f04..4a57cf3 100644 --- a/content/helpers_test.go +++ b/content/helpers_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" _ "crypto/sha256" // required by go-digest + "fmt" "io" "strings" "testing" @@ -39,38 +40,107 @@ type copySource struct { func TestCopy(t *testing.T) { defaultSource := newCopySource("this is the source to copy") + cf1 := func(buf *bytes.Buffer, st Status) commitFunction { + i := 0 + return func() error { + // function resets the first time + if i == 0 { + // this is the case where, the pipewriter to which the data was being written has + // changed. which means we need to clear the buffer + i++ + buf.Reset() + st.Offset = 0 + return ErrReset + } + return nil + } + } + + cf2 := func(buf *bytes.Buffer, st Status) commitFunction { + i := 0 + return func() error { + // function resets for more than the maxReset value + if i < maxResets+1 { + // this is the case where, the pipewriter to which the data was being written has + // changed. which means we need to clear the buffer + i++ + buf.Reset() + st.Offset = 0 + return ErrReset + } + return nil + } + } + + s1 := Status{} + s2 := Status{} + b1 := bytes.Buffer{} + b2 := bytes.Buffer{} + var testcases = []struct { - name string - source copySource - writer fakeWriter - expected string + name string + source copySource + writer fakeWriter + expected string + expectedErr error }{ { - name: "copy no offset", - source: defaultSource, - writer: fakeWriter{}, + name: "copy no offset", + source: defaultSource, + writer: fakeWriter{ + Buffer: &bytes.Buffer{}, + }, expected: "this is the source to copy", }, { - name: "copy with offset from seeker", - source: defaultSource, - writer: fakeWriter{status: Status{Offset: 8}}, + name: "copy with offset from seeker", + source: defaultSource, + writer: fakeWriter{ + Buffer: &bytes.Buffer{}, + status: Status{Offset: 8}, + }, expected: "the source to copy", }, { - name: "copy with offset from unseekable source", - source: copySource{reader: bytes.NewBufferString("foobar"), size: 6}, - writer: fakeWriter{status: Status{Offset: 3}}, + name: "copy with offset from unseekable source", + source: copySource{reader: bytes.NewBufferString("foobar"), size: 6}, + writer: fakeWriter{ + Buffer: &bytes.Buffer{}, + status: Status{Offset: 3}, + }, expected: "bar", }, { name: "commit already exists", source: newCopySource("this already exists"), - writer: fakeWriter{commitFunc: func() error { - return errdefs.ErrAlreadyExists - }}, + writer: fakeWriter{ + Buffer: &bytes.Buffer{}, + commitFunc: func() error { + return errdefs.ErrAlreadyExists + }}, expected: "this already exists", }, + { + name: "commit fails first time with ErrReset", + source: newCopySource("content to copy"), + writer: fakeWriter{ + Buffer: &b1, + status: s1, + commitFunc: cf1(&b1, s1), + }, + expected: "content to copy", + }, + { + name: "write fails more than maxReset times due to reset", + source: newCopySource("content to copy"), + writer: fakeWriter{ + Buffer: &b2, + status: s2, + commitFunc: cf2(&b2, s2), + }, + expected: "", + expectedErr: fmt.Errorf("failed to copy after %d retries", maxResets), + }, } for _, testcase := range testcases { @@ -81,6 +151,12 @@ func TestCopy(t *testing.T) { testcase.source.size, testcase.source.digest) + // if an error is expected then further comparisons are not required + if testcase.expectedErr != nil { + assert.Check(t, is.Equal(testcase.expectedErr.Error(), err.Error())) + return + } + assert.NilError(t, err) assert.Check(t, is.Equal(testcase.source.digest, testcase.writer.committedDigest)) assert.Check(t, is.Equal(testcase.expected, testcase.writer.String())) @@ -96,11 +172,13 @@ func newCopySource(raw string) copySource { } } +type commitFunction func() error + type fakeWriter struct { - bytes.Buffer + *bytes.Buffer committedDigest digest.Digest status Status - commitFunc func() error + commitFunc commitFunction } func (f *fakeWriter) Close() error { diff --git a/content/local/locks.go b/content/local/locks.go index d1d2d56..1e59f39 100644 --- a/content/local/locks.go +++ b/content/local/locks.go @@ -17,11 +17,11 @@ package local import ( + "fmt" "sync" "time" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) // Handles locking references @@ -41,7 +41,13 @@ func tryLock(ref string) error { defer locksMu.Unlock() if v, ok := locks[ref]; ok { - return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since) + // Returning the duration may help developers distinguish dead locks (long duration) from + // lock contentions (short duration). + now := time.Now() + return fmt.Errorf( + "ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since, + errdefs.ErrUnavailable, + ) } locks[ref] = &lock{time.Now()} diff --git a/content/local/locks_test.go b/content/local/locks_test.go index c9d0034..3d1912d 100644 --- a/content/local/locks_test.go +++ b/content/local/locks_test.go @@ -28,5 +28,5 @@ func TestTryLock(t *testing.T) { defer unlock("testref") err = tryLock("testref") - assert.ErrorContains(t, err, "ref testref locked since ") + assert.ErrorContains(t, err, "ref testref locked for ") } diff --git a/content/local/readerat.go b/content/local/readerat.go index 5d3ae03..a83c171 100644 --- a/content/local/readerat.go +++ b/content/local/readerat.go @@ -17,10 +17,9 @@ package local import ( + "fmt" "os" - "github.com/pkg/errors" - "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" ) @@ -40,7 +39,7 @@ func OpenReader(p string) (content.ReaderAt, error) { return nil, err } - return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") + return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) } fp, err := os.Open(p) @@ -49,7 +48,7 @@ func OpenReader(p string) (content.ReaderAt, error) { return nil, err } - return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") + return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) } return sizeReaderAt{size: fi.Size(), fp: fp}, nil diff --git a/content/local/store.go b/content/local/store.go index 314d913..1b01790 100644 --- a/content/local/store.go +++ b/content/local/store.go @@ -20,8 +20,6 @@ import ( "context" "fmt" "io" - "io/ioutil" - "math/rand" "os" "path/filepath" "strconv" @@ -33,11 +31,11 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/randutil" "github.com/sirupsen/logrus" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var bufPool = sync.Pool{ @@ -94,13 +92,13 @@ func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { p, err := s.blobPath(dgst) if err != nil { - return content.Info{}, errors.Wrapf(err, "calculating blob info path") + return content.Info{}, fmt.Errorf("calculating blob info path: %w", err) } fi, err := os.Stat(p) if err != nil { if os.IsNotExist(err) { - err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) } return content.Info{}, err @@ -129,12 +127,12 @@ func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]strin func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { p, err := s.blobPath(desc.Digest) if err != nil { - return nil, errors.Wrapf(err, "calculating blob path for ReaderAt") + return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err) } reader, err := OpenReader(p) if err != nil { - return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p) + return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err) } return reader, nil @@ -147,7 +145,7 @@ func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content. func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { bp, err := s.blobPath(dgst) if err != nil { - return errors.Wrapf(err, "calculating blob path for delete") + return fmt.Errorf("calculating blob path for delete: %w", err) } if err := os.RemoveAll(bp); err != nil { @@ -155,7 +153,7 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { return err } - return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) } return nil @@ -163,18 +161,18 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { if s.ls == nil { - return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") + return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition) } p, err := s.blobPath(info.Digest) if err != nil { - return content.Info{}, errors.Wrapf(err, "calculating blob path for update") + return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err) } fi, err := os.Stat(p) if err != nil { if os.IsNotExist(err) { - err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest) + err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound) } return content.Info{}, err @@ -201,7 +199,7 @@ func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...str all = true labels = info.Labels default: - return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) + return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument) } } } else { @@ -378,7 +376,7 @@ func (s *store) status(ingestPath string) (content.Status, error) { fi, err := os.Stat(dp) if err != nil { if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) } return content.Status{}, err } @@ -386,19 +384,19 @@ func (s *store) status(ingestPath string) (content.Status, error) { ref, err := readFileString(filepath.Join(ingestPath, "ref")) if err != nil { if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) } return content.Status{}, err } startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) if err != nil { - return content.Status{}, errors.Wrapf(err, "could not read startedat") + return content.Status{}, fmt.Errorf("could not read startedat: %w", err) } updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) if err != nil { - return content.Status{}, errors.Wrapf(err, "could not read updatedat") + return content.Status{}, fmt.Errorf("could not read updatedat: %w", err) } // because we don't write updatedat on every write, the mod time may @@ -461,7 +459,7 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content. // TODO(AkihiroSuda): we could create a random string or one calculated based on the context // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 if wOpts.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) } var lockErr error for count := uint64(0); count < 10; count++ { @@ -475,7 +473,7 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content. lockErr = nil break } - time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 && status.Total > 0 && total != status.Total { - return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total) + return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total) } + //nolint:dupword // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes fp, err := os.Open(data) if err != nil { @@ -528,10 +527,10 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di if expected != "" { p, err := s.blobPath(expected) if err != nil { - return nil, errors.Wrap(err, "calculating expected blob path for writer") + return nil, fmt.Errorf("calculating expected blob path for writer: %w", err) } if _, err := os.Stat(p); err == nil { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected) + return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists) } } @@ -568,7 +567,7 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di // the ingest is new, we need to setup the target location. // write the ref to a file for later use - if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil { + if err := os.WriteFile(refp, []byte(ref), 0666); err != nil { return nil, err } @@ -581,7 +580,7 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di } if total > 0 { - if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { + if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { return nil, err } } @@ -589,11 +588,12 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { - return nil, errors.Wrap(err, "failed to open data file") + return nil, fmt.Errorf("failed to open data file: %w", err) } if _, err := fp.Seek(offset, io.SeekStart); err != nil { - return nil, errors.Wrap(err, "could not seek to current write offset") + fp.Close() + return nil, fmt.Errorf("could not seek to current write offset: %w", err) } return &writer{ @@ -615,7 +615,7 @@ func (s *store) Abort(ctx context.Context, ref string) error { root := s.ingestRoot(ref) if err := os.RemoveAll(root); err != nil { if os.IsNotExist(err) { - return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref) + return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound) } return err @@ -626,7 +626,7 @@ func (s *store) Abort(ctx context.Context, ref string) error { func (s *store) blobPath(dgst digest.Digest) (string, error) { if err := dgst.Validate(); err != nil { - return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err) + return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument) } return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil @@ -644,7 +644,6 @@ func (s *store) ingestRoot(ref string) string { // - root: entire ingest directory // - ref: name of the starting ref, must be unique // - data: file where data is written -// func (s *store) ingestPaths(ref string) (string, string, string) { var ( fp = s.ingestRoot(ref) @@ -656,23 +655,23 @@ func (s *store) ingestPaths(ref string) (string, string, string) { } func readFileString(path string) (string, error) { - p, err := ioutil.ReadFile(path) + p, err := os.ReadFile(path) return string(p), err } // readFileTimestamp reads a file with just a timestamp present. func readFileTimestamp(p string) (time.Time, error) { - b, err := ioutil.ReadFile(p) + b, err := os.ReadFile(p) if err != nil { if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) } return time.Time{}, err } var t time.Time if err := t.UnmarshalText(b); err != nil { - return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p) + return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err) } return t, nil @@ -683,19 +682,23 @@ func writeTimestampFile(p string, t time.Time) error { if err != nil { return err } - return atomicWrite(p, b, 0666) + return writeToCompletion(p, b, 0666) } -func atomicWrite(path string, data []byte, mode os.FileMode) error { +func writeToCompletion(path string, data []byte, mode os.FileMode) error { tmp := fmt.Sprintf("%s.tmp", path) f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) if err != nil { - return errors.Wrap(err, "create tmp file") + return fmt.Errorf("create tmp file: %w", err) } _, err = f.Write(data) f.Close() if err != nil { - return errors.Wrap(err, "write atomic data") + return fmt.Errorf("write tmp file: %w", err) } - return os.Rename(tmp, path) + err = os.Rename(tmp, path) + if err != nil { + return fmt.Errorf("rename tmp file: %w", err) + } + return nil } diff --git a/content/local/store_bsd.go b/content/local/store_bsd.go index da149a2..42fddd3 100644 --- a/content/local/store_bsd.go +++ b/content/local/store_bsd.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd || netbsd // +build darwin freebsd netbsd /* @@ -26,7 +27,7 @@ import ( func getATime(fi os.FileInfo) time.Time { if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(st.Atimespec.Unix()) } return fi.ModTime() diff --git a/content/local/store_openbsd.go b/content/local/store_openbsd.go index f34f0da..2b58b61 100644 --- a/content/local/store_openbsd.go +++ b/content/local/store_openbsd.go @@ -1,3 +1,4 @@ +//go:build openbsd // +build openbsd /* @@ -26,7 +27,7 @@ import ( func getATime(fi os.FileInfo) time.Time { if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(st.Atim.Unix()) } return fi.ModTime() diff --git a/content/local/store_test.go b/content/local/store_test.go index 156fcd1..59c0151 100644 --- a/content/local/store_test.go +++ b/content/local/store_test.go @@ -20,11 +20,10 @@ import ( "bufio" "bytes" "context" + "crypto/rand" _ "crypto/sha256" // required for digest package "fmt" "io" - "io/ioutil" - "math/rand" "os" "path/filepath" "reflect" @@ -36,6 +35,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/content/testsuite" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/randutil" "github.com/containerd/containerd/pkg/testutil" "github.com/opencontainers/go-digest" @@ -158,7 +158,7 @@ func TestContentWriter(t *testing.T) { } expected := digest.FromBytes(p) - checkCopy(t, int64(len(p)), cw, bufio.NewReader(ioutil.NopCloser(bytes.NewReader(p)))) + checkCopy(t, int64(len(p)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(p)))) if err := cw.Commit(ctx, int64(len(p)), expected); err != nil { t.Fatal(err) @@ -174,7 +174,7 @@ func TestContentWriter(t *testing.T) { } // now, attempt to write the same data again - checkCopy(t, int64(len(p)), cw, bufio.NewReader(ioutil.NopCloser(bytes.NewReader(p)))) + checkCopy(t, int64(len(p)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(p)))) if err := cw.Commit(ctx, int64(len(p)), expected); err == nil { t.Fatal("expected already exists error") } else if !errdefs.IsAlreadyExists(err) { @@ -184,7 +184,7 @@ func TestContentWriter(t *testing.T) { path := checkBlobPath(t, cs, expected) // read the data back, make sure its the same - pp, err := ioutil.ReadFile(path) + pp, err := os.ReadFile(path) if err != nil { t.Fatal(err) } @@ -269,7 +269,7 @@ func generateBlobs(t checker, nblobs, maxsize int64) map[digest.Digest][]byte { blobs := map[digest.Digest][]byte{} for i := int64(0); i < nblobs; i++ { - p := make([]byte, rand.Int63n(maxsize)) + p := make([]byte, randutil.Int63n(maxsize)) if _, err := rand.Read(p); err != nil { t.Fatal(err) @@ -292,28 +292,17 @@ func populateBlobStore(ctx context.Context, t checker, cs content.Store, nblobs, return blobs } -func contentStoreEnv(t checker) (context.Context, string, content.Store, func()) { - pc, _, _, ok := runtime.Caller(1) - if !ok { - t.Fatal("failed to resolve caller") - } - fn := runtime.FuncForPC(pc) - - tmpdir, err := ioutil.TempDir("", filepath.Base(fn.Name())+"-") - if err != nil { - t.Fatal(err) - } +func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) { + tmpdir := t.TempDir() cs, err := NewStore(tmpdir) if err != nil { - os.RemoveAll(tmpdir) t.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) return ctx, tmpdir, cs, func() { cancel() - os.RemoveAll(tmpdir) } } @@ -362,11 +351,7 @@ func checkWrite(ctx context.Context, t checker, cs content.Store, dgst digest.Di } func TestWriterTruncateRecoversFromIncompleteWrite(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "test-local-content-store-recover") - assert.NilError(t, err) - defer os.RemoveAll(tmpdir) - - cs, err := NewStore(tmpdir) + cs, err := NewStore(t.TempDir()) assert.NilError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -401,11 +386,7 @@ func setupIncompleteWrite(ctx context.Context, t *testing.T, cs content.Store, r } func TestWriteReadEmptyFileTimestamp(t *testing.T) { - root, err := ioutil.TempDir("", "test-write-read-file-timestamp") - if err != nil { - t.Errorf("failed to create a tmp dir: %v", err) - } - defer os.RemoveAll(root) + root := t.TempDir() emptyFile := filepath.Join(root, "updatedat") if err := writeTimestampFile(emptyFile, time.Time{}); err != nil { diff --git a/content/local/store_unix.go b/content/local/store_unix.go index 69a74ba..efa2eb9 100644 --- a/content/local/store_unix.go +++ b/content/local/store_unix.go @@ -1,3 +1,4 @@ +//go:build linux || solaris // +build linux solaris /* @@ -26,7 +27,7 @@ import ( func getATime(fi os.FileInfo) time.Time { if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(st.Atim.Unix()) } return fi.ModTime() diff --git a/content/local/writer.go b/content/local/writer.go index 0a11f4d..b187e52 100644 --- a/content/local/writer.go +++ b/content/local/writer.go @@ -18,6 +18,8 @@ package local import ( "context" + "errors" + "fmt" "io" "os" "path/filepath" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // writer represents a write transaction against the blob store. @@ -88,30 +89,30 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, w.fp = nil if fp == nil { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition) } if err := fp.Sync(); err != nil { fp.Close() - return errors.Wrap(err, "sync failed") + return fmt.Errorf("sync failed: %w", err) } fi, err := fp.Stat() closeErr := fp.Close() if err != nil { - return errors.Wrap(err, "stat on ingest file failed") + return fmt.Errorf("stat on ingest file failed: %w", err) } if closeErr != nil { - return errors.Wrap(err, "failed to close ingest file") + return fmt.Errorf("failed to close ingest file: %w", closeErr) } if size > 0 && size != fi.Size() { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size) + return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition) } dgst := w.digester.Digest() if expected != "" && expected != dgst { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) + return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition) } var ( @@ -127,9 +128,9 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, if _, err := os.Stat(target); err == nil { // collision with the target file! if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") } - return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) + return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists) } if err := os.Rename(ingest, target); err != nil { @@ -142,17 +143,17 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, commitTime := time.Now() if err := os.Chtimes(target, commitTime, commitTime); err != nil { - log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time") + log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time") } // clean up!! if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") } if w.s.ls != nil && base.Labels != nil { if err := w.s.ls.Set(dgst, base.Labels); err != nil { - log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels") + log.G(ctx).WithField("digest", dgst).Error("failed to set labels") } } @@ -165,7 +166,7 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, // NOTE: Windows does not support this operation if runtime.GOOS != "windows" { if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { - log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly") + log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly") } } diff --git a/content/proxy/content_writer.go b/content/proxy/content_writer.go index 8423335..ffc0f50 100644 --- a/content/proxy/content_writer.go +++ b/content/proxy/content_writer.go @@ -18,13 +18,13 @@ package proxy import ( "context" + "fmt" "io" contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type remoteWriter struct { @@ -57,7 +57,7 @@ func (rw *remoteWriter) Status() (content.Status, error) { Action: contentapi.WriteActionStat, }) if err != nil { - return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status") + return content.Status{}, fmt.Errorf("error getting writer status: %w", errdefs.FromGRPC(err)) } return content.Status{ @@ -82,7 +82,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) { Data: p, }) if err != nil { - return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write") + return 0, fmt.Errorf("failed to send write: %w", errdefs.FromGRPC(err)) } n = int(resp.Offset - offset) @@ -119,15 +119,15 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest. Labels: base.Labels, }) if err != nil { - return errors.Wrap(errdefs.FromGRPC(err), "commit failed") + return fmt.Errorf("commit failed: %w", errdefs.FromGRPC(err)) } if size != 0 && resp.Offset != size { - return errors.Errorf("unexpected size: %v != %v", resp.Offset, size) + return fmt.Errorf("unexpected size: %v != %v", resp.Offset, size) } if expected != "" && resp.Digest != expected { - return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected) + return fmt.Errorf("unexpected digest: %v != %v", resp.Digest, expected) } rw.digest = resp.Digest diff --git a/content/testsuite/testsuite.go b/content/testsuite/testsuite.go index 8264f62..c02498f 100644 --- a/content/testsuite/testsuite.go +++ b/content/testsuite/testsuite.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math/rand" "os" "runtime" @@ -35,7 +34,6 @@ import ( "github.com/containerd/containerd/pkg/testutil" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "gotest.tools/v3/assert" ) @@ -104,7 +102,7 @@ func makeTest(t *testing.T, name string, storeFn func(ctx context.Context, root ctx := context.WithValue(context.Background(), nameKey{}, name) ctx = logtest.WithT(ctx, t) - tmpDir, err := ioutil.TempDir("", "content-suite-"+name+"-") + tmpDir, err := os.MkdirTemp("", "content-suite-"+name+"-") if err != nil { t.Fatal(err) } @@ -713,35 +711,47 @@ func checkResume(rf func(context.Context, content.Writer, []byte, int64, int64, func resumeTruncate(ctx context.Context, w content.Writer, b []byte, written, size int64, dgst digest.Digest) error { if err := w.Truncate(0); err != nil { - return errors.Wrap(err, "truncate failed") + return fmt.Errorf("truncate failed: %w", err) } if _, err := io.CopyBuffer(w, bytes.NewReader(b), make([]byte, 1024)); err != nil { - return errors.Wrap(err, "write failed") + return fmt.Errorf("write failed: %w", err) } - - return errors.Wrap(w.Commit(ctx, size, dgst), "commit failed") + if err := w.Commit(ctx, size, dgst); err != nil { + return fmt.Errorf("commit failed: %w", err) + } + return nil } func resumeDiscard(ctx context.Context, w content.Writer, b []byte, written, size int64, dgst digest.Digest) error { if _, err := io.CopyBuffer(w, bytes.NewReader(b[written:]), make([]byte, 1024)); err != nil { - return errors.Wrap(err, "write failed") + return fmt.Errorf("write failed: %w", err) } - return errors.Wrap(w.Commit(ctx, size, dgst), "commit failed") + if err := w.Commit(ctx, size, dgst); err != nil { + return fmt.Errorf("commit failed: %w", err) + + } + return nil } func resumeCopy(ctx context.Context, w content.Writer, b []byte, _, size int64, dgst digest.Digest) error { r := struct { io.Reader }{bytes.NewReader(b)} - return errors.Wrap(content.Copy(ctx, w, r, size, dgst), "copy failed") + if err := content.Copy(ctx, w, r, size, dgst); err != nil { + return fmt.Errorf("copy failed: %w", err) + } + return nil } func resumeCopySeeker(ctx context.Context, w content.Writer, b []byte, _, size int64, dgst digest.Digest) error { r := struct { io.ReadSeeker }{bytes.NewReader(b)} - return errors.Wrap(content.Copy(ctx, w, r, size, dgst), "copy failed") + if err := content.Copy(ctx, w, r, size, dgst); err != nil { + return fmt.Errorf("copy failed: %w", err) + } + return nil } func resumeCopyReaderAt(ctx context.Context, w content.Writer, b []byte, _, size int64, dgst digest.Digest) error { @@ -752,7 +762,10 @@ func resumeCopyReaderAt(ctx context.Context, w content.Writer, b []byte, _, size r := struct { readerAt }{bytes.NewReader(b)} - return errors.Wrap(content.Copy(ctx, w, r, size, dgst), "copy failed") + if err := content.Copy(ctx, w, r, size, dgst); err != nil { + return fmt.Errorf("copy failed: %w", err) + } + return nil } // checkSmallBlob tests reading a blob which is smaller than the read size. @@ -780,8 +793,9 @@ func checkSmallBlob(ctx context.Context, t *testing.T, store content.Store) { if err != nil { t.Fatal(err) } + defer ra.Close() r := io.NewSectionReader(ra, 0, readSize) - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { t.Fatal(err) } @@ -820,6 +834,7 @@ func checkCrossNSShare(ctx context.Context, t *testing.T, cs content.Store) { if err != nil { t.Fatal(err) } + defer w.Close() t2 := time.Now() checkStatus(t, w, content.Status{ @@ -878,6 +893,7 @@ func checkCrossNSAppend(ctx context.Context, t *testing.T, cs content.Store) { if err != nil { t.Fatal(err) } + defer w.Close() t2 := time.Now() checkStatus(t, w, content.Status{ @@ -940,6 +956,7 @@ func checkCrossNSIsolate(ctx context.Context, t *testing.T, cs content.Store) { if err != nil { t.Fatal(err) } + defer w.Close() t4 := time.Now() checkNewlyCreated(t, w, t1, t2, t3, t4) @@ -1015,35 +1032,35 @@ func checkNewlyCreated(t *testing.T, w content.Writer, preStart, postStart, preU func checkInfo(ctx context.Context, cs content.Store, d digest.Digest, expected content.Info, c1, c2, u1, u2 time.Time) error { info, err := cs.Info(ctx, d) if err != nil { - return errors.Wrap(err, "failed to get info") + return fmt.Errorf("failed to get info: %w", err) } if info.Digest != d { - return errors.Errorf("unexpected info digest %s, expected %s", info.Digest, d) + return fmt.Errorf("unexpected info digest %s, expected %s", info.Digest, d) } if info.Size != expected.Size { - return errors.Errorf("unexpected info size %d, expected %d", info.Size, expected.Size) + return fmt.Errorf("unexpected info size %d, expected %d", info.Size, expected.Size) } if info.CreatedAt.After(c2) || info.CreatedAt.Before(c1) { - return errors.Errorf("unexpected created at time %s, expected between %s and %s", info.CreatedAt, c1, c2) + return fmt.Errorf("unexpected created at time %s, expected between %s and %s", info.CreatedAt, c1, c2) } // FIXME: broken on windows: unexpected updated at time 2017-11-14 13:43:22.178013 -0800 PST, // expected between 2017-11-14 13:43:22.1790195 -0800 PST m=+1.022137300 and // 2017-11-14 13:43:22.1790195 -0800 PST m=+1.022137300 if runtime.GOOS != "windows" && (info.UpdatedAt.After(u2) || info.UpdatedAt.Before(u1)) { - return errors.Errorf("unexpected updated at time %s, expected between %s and %s", info.UpdatedAt, u1, u2) + return fmt.Errorf("unexpected updated at time %s, expected between %s and %s", info.UpdatedAt, u1, u2) } if len(info.Labels) != len(expected.Labels) { - return errors.Errorf("mismatched number of labels\ngot:\n%#v\nexpected:\n%#v", info.Labels, expected.Labels) + return fmt.Errorf("mismatched number of labels\ngot:\n%#v\nexpected:\n%#v", info.Labels, expected.Labels) } for k, v := range expected.Labels { actual := info.Labels[k] if v != actual { - return errors.Errorf("unexpected value for label %q: %q, expected %q", k, actual, v) + return fmt.Errorf("unexpected value for label %q: %q, expected %q", k, actual, v) } } @@ -1056,16 +1073,16 @@ func checkContent(ctx context.Context, cs content.Store, d digest.Digest, expect b, err := content.ReadBlob(ctx, cs, ocispec.Descriptor{Digest: d}) if err != nil { - return errors.Wrap(err, "failed to read blob") + return fmt.Errorf("failed to read blob: %w", err) } if int64(len(b)) != expected.Size { - return errors.Errorf("wrong blob size %d, expected %d", len(b), expected.Size) + return fmt.Errorf("wrong blob size %d, expected %d", len(b), expected.Size) } actual := digest.FromBytes(b) if actual != d { - return errors.Errorf("wrong digest %s, expected %s", actual, d) + return fmt.Errorf("wrong digest %s, expected %s", actual, d) } return nil @@ -1079,7 +1096,7 @@ func createContent(size int64) ([]byte, digest.Digest) { // test runs. An atomic integer works just good enough for this. seed := atomic.AddInt64(&contentSeed, 1) - b, err := ioutil.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), size)) + b, err := io.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), size)) if err != nil { panic(err) } diff --git a/contrib/Dockerfile.test b/contrib/Dockerfile.test index 8637c4b..a9ea3ee 100644 --- a/contrib/Dockerfile.test +++ b/contrib/Dockerfile.test @@ -6,55 +6,94 @@ # 3.) $ make binaries install test # # Use the RUNC_VERSION build-arg to build with a custom version of runc, for example, -# to build runc v1.0.0-rc93, use: +# to build runc v1.0.0-rc94, use: # -# docker build -t containerd-test --build-arg RUNC_VERSION=v1.0.0-rc93 -f Dockerfile.test ../ +# docker build -t containerd-test --build-arg RUNC_VERSION=v1.0.0-rc94 -f Dockerfile.test ../ -ARG GOLANG_VERSION=1.16.12 +ARG GOLANG_VERSION=1.20.8 +ARG GOLANG_IMAGE=golang -FROM golang:${GOLANG_VERSION} AS golang-base -RUN mkdir -p /go/src/github.com/containerd/containerd -WORKDIR /go/src/github.com/containerd/containerd - -# Install proto3 -FROM golang-base AS proto3 -RUN apt-get update && apt-get install -y \ - autoconf \ - automake \ - g++ \ - libtool \ - unzip \ - --no-install-recommends - -COPY script/setup/install-protobuf install-protobuf -RUN ./install-protobuf +FROM ${GOLANG_IMAGE}:${GOLANG_VERSION} AS golang # Install runc -FROM golang-base AS runc -RUN apt-get update && apt-get install -y \ - curl \ +FROM golang AS runc +RUN apt-get update && apt-get install -y --no-install-recommends \ libseccomp-dev \ - --no-install-recommends + && rm -rf /var/lib/apt/lists/* COPY script/setup/runc-version script/setup/install-runc ./ # Allow overriding the version of runc to install through build-args ARG RUNC_VERSION ARG GOPROXY=direct +ARG DESTDIR=/build RUN ./install-runc -FROM golang-base AS dev -RUN apt-get update && apt-get install -y \ +FROM golang AS build-env +RUN apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev \ btrfs-progs \ - gcc \ - git \ libseccomp-dev \ - make \ xfsprogs \ - --no-install-recommends + && rm -rf /var/lib/apt/lists/* +RUN mkdir -p /go/src/github.com/containerd/containerd +WORKDIR /go/src/github.com/containerd/containerd -COPY --from=proto3 /usr/local/bin/protoc /usr/local/bin/protoc -COPY --from=proto3 /usr/local/include/google /usr/local/include/google -COPY --from=runc /usr/local/sbin/runc /usr/local/go/bin/runc +FROM golang AS cni +ENV DESTDIR=/build +COPY script/setup/install-cni go.mod / +RUN DESTDIR=/build /install-cni +FROM golang AS critools +ARG DESTDIR=/build +COPY script/setup/install-critools script/setup/critools-version ./ +RUN GOBIN=$DESTDIR/usr/local/bin ./install-critools + +FROM build-env AS containerd +ARG DESTDIR=/build +COPY . . +RUN make BUILDTAGS="no_btrfs no_devmapper" binaries install + +# cri-in-userns stage is for testing "CRI-in-UserNS", which should be used in conjunction with +# "Kubelet-in-UserNS": https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2033-kubelet-in-userns-aka-rootless +# This feature is mostly expected to be used for `kind` and `minikube`. +# +# Requires Rootless Docker/Podman/nerdctl with cgroup v2 delegation: https://rootlesscontaine.rs/getting-started/common/cgroup2/ +# (Rootless Docker/Podman/nerdctl prepares the UserNS, so we do not need to create UserNS by ourselves) +FROM build-env AS cri-in-userns +RUN apt-get update && apt-get install -y --no-install-recommends \ + iptables \ + && rm -rf /var/lib/apt/lists/* +COPY contrib/Dockerfile.test.d/cri-in-userns/etc_containerd_config.toml /etc/containerd/config.toml +COPY contrib/Dockerfile.test.d/cri-in-userns/docker-entrypoint.sh /docker-entrypoint.sh +COPY --from=runc /build/ / +COPY --from=cni /build/ / +COPY --from=critools /build/ / +COPY --from=containerd /build/ / +VOLUME /var/lib/containerd +ENTRYPOINT ["/docker-entrypoint.sh"] +# Skip "runtime should support unsafe sysctls": `container init caused: write sysctl key fs.mqueue.msg_max: open /proc/sys/fs/mqueue/msg_max: permission denied` +# Skip "runtime should support safe sysctls": `container init caused: write sysctl key kernel.shm_rmid_forced: open /proc/sys/kernel/shm_rmid_forced: permission denied` +# Skip "should allow privilege escalation when (NoNewPrivis is) false": expected log "Effective uid: 0\n" (stream="stdout") not found in logs [{timestamp:{wall:974487519 ext:63761339984 loc:} stream:stdout log:Effective uid: 1000) }] +CMD ["critest", "--ginkgo.skip=should support unsafe sysctls|should support safe sysctls|should allow privilege escalation when false"] + +# Install proto3 +FROM golang AS proto3 +ARG DESTDIR=/build +RUN apt-get update && apt-get install -y --no-install-recommends \ + autoconf \ + automake \ + g++ \ + libtool \ + unzip \ + && rm -rf /var/lib/apt/lists/* + +COPY script/setup/install-protobuf install-protobuf +RUN ./install-protobuf \ + && mkdir -p $DESTDIR/usr/local/bin $DESTDIR/usr/local/include \ + && mv /usr/local/bin/protoc $DESTDIR/usr/local/bin/protoc \ + && mv /usr/local/include/google $DESTDIR/usr/local/include/google + +FROM build-env AS dev +COPY --from=proto3 /build/ / +COPY --from=runc /build/ / COPY . . diff --git a/contrib/Dockerfile.test.d/cri-in-userns/docker-entrypoint.sh b/contrib/Dockerfile.test.d/cri-in-userns/docker-entrypoint.sh new file mode 100755 index 0000000..e7b5882 --- /dev/null +++ b/contrib/Dockerfile.test.d/cri-in-userns/docker-entrypoint.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +# Check 4294967295 to detect UserNS (https://github.com/opencontainers/runc/blob/v1.0.0/libcontainer/userns/userns_linux.go#L29-L32) +if grep -Eq "0[[:space:]]+0[[:space:]]+4294967295" /proc/self/uid_map; then + echo >&2 "ERROR: Needs to be executed in UserNS (i.e., rootless Docker/Podman/nerdctl)" + exit 1 +fi + +if [ ! -f "/sys/fs/cgroup/cgroup.controllers" ]; then + echo >&2 "ERROR: Needs cgroup v2" + exit 1 +fi + +for f in cpu memory pids; do + if ! grep -qw "$f" "/sys/fs/cgroup/cgroup.controllers"; then + echo >&2 "ERROR: Needs cgroup v2 controller ${f} to be delegated" + exit 1 + fi +done + +echo >&2 "Enabling cgroup v2 nesting" +# https://github.com/moby/moby/blob/v20.10.7/hack/dind#L28-L38 +mkdir -p /sys/fs/cgroup/init +xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || : +sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \ + > /sys/fs/cgroup/cgroup.subtree_control + +set -x +echo >&2 "Running containerd in background" +containerd & + +echo >&2 "Waiting for containerd" +until ctr plugins list; do sleep 3; done + +exec "$@" diff --git a/contrib/Dockerfile.test.d/cri-in-userns/etc_containerd_config.toml b/contrib/Dockerfile.test.d/cri-in-userns/etc_containerd_config.toml new file mode 100644 index 0000000..c69b0d8 --- /dev/null +++ b/contrib/Dockerfile.test.d/cri-in-userns/etc_containerd_config.toml @@ -0,0 +1,10 @@ +version = 2 + +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + disable_apparmor = true + restrict_oom_score_adj = true + disable_hugetlb_controller = true + [plugins."io.containerd.grpc.v1.cri".containerd] + # Rootless overlayfs requires kernel >= 5.11 && !selinux + snapshotter = "overlayfs" diff --git a/contrib/ansible/README.md b/contrib/ansible/README.md index bbe05d3..69f73e2 100644 --- a/contrib/ansible/README.md +++ b/contrib/ansible/README.md @@ -26,8 +26,8 @@ $ cat hosts ## Step 1: At this point, the ansible playbook should be able to ssh into the machines in the hosts file. ```console -git clone https://github.com/containerd/cri -cd ./cri/contrib/ansible +git clone https://github.com/containerd/containerd +cd ./contrib/ansible ansible-playbook -i hosts cri-containerd.yaml ``` A typical cloud login might have a username and private key file, in which case the following can be used: diff --git a/contrib/ansible/cri-containerd.yaml b/contrib/ansible/cri-containerd.yaml index feec362..3a2b4d2 100644 --- a/contrib/ansible/cri-containerd.yaml +++ b/contrib/ansible/cri-containerd.yaml @@ -61,6 +61,6 @@ # TODO This needs to be removed once we have consistent concurrent pull results - name: "Pre-pull pause container image" shell: | - /usr/local/bin/ctr pull k8s.gcr.io/pause:3.5 + /usr/local/bin/ctr pull registry.k8s.io/pause:3.6 /usr/local/bin/crictl --runtime-endpoint unix:///run/containerd/containerd.sock \ - pull k8s.gcr.io/pause:3.5 + pull registry.k8s.io/pause:3.6 diff --git a/contrib/ansible/tasks/k8s.yaml b/contrib/ansible/tasks/k8s.yaml index e2e017c..76bda51 100644 --- a/contrib/ansible/tasks/k8s.yaml +++ b/contrib/ansible/tasks/k8s.yaml @@ -13,7 +13,7 @@ when: ansible_distribution == "Ubuntu" - name: "Update the repository cache (Ubuntu)" - apt: + apt: update_cache: yes when: ansible_distribution == "Ubuntu" @@ -25,8 +25,8 @@ gpgcheck: yes enabled: yes repo_gpgcheck: yes - gpgkey: - - https://packages.cloud.google.com/yum/doc/yum-key.gpg + gpgkey: + - https://packages.cloud.google.com/yum/doc/yum-key.gpg - https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg when: ansible_distribution == "CentOS" diff --git a/contrib/ansible/vars/vars.yaml b/contrib/ansible/vars/vars.yaml index 9ae0e06..1ad521b 100644 --- a/contrib/ansible/vars/vars.yaml +++ b/contrib/ansible/vars/vars.yaml @@ -1,4 +1,4 @@ --- -containerd_release_version: 1.3.0 +containerd_release_version: 1.5.5 cni_bin_dir: /opt/cni/bin/ cni_conf_dir: /etc/cni/net.d/ diff --git a/contrib/apparmor/apparmor.go b/contrib/apparmor/apparmor.go index ec255fc..be6a49a 100644 --- a/contrib/apparmor/apparmor.go +++ b/contrib/apparmor/apparmor.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -21,13 +22,12 @@ package apparmor import ( "bytes" "context" - "io/ioutil" + "fmt" "os" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // WithProfile sets the provided apparmor profile to the spec @@ -64,7 +64,7 @@ func LoadDefaultProfile(name string) error { if err != nil { return err } - f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), p.Name) + f, err := os.CreateTemp(os.Getenv("XDG_RUNTIME_DIR"), p.Name) if err != nil { return err } @@ -76,7 +76,7 @@ func LoadDefaultProfile(name string) error { return err } if err := load(path); err != nil { - return errors.Wrapf(err, "load apparmor profile %s", path) + return fmt.Errorf("load apparmor profile %s: %w", path, err) } return nil } diff --git a/contrib/apparmor/apparmor_test.go b/contrib/apparmor/apparmor_test.go index 478a609..8c907ef 100644 --- a/contrib/apparmor/apparmor_test.go +++ b/contrib/apparmor/apparmor_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/contrib/apparmor/apparmor_unsupported.go b/contrib/apparmor/apparmor_unsupported.go index 0429954..ac00fd1 100644 --- a/contrib/apparmor/apparmor_unsupported.go +++ b/contrib/apparmor/apparmor_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* @@ -20,11 +21,11 @@ package apparmor import ( "context" + "errors" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // WithProfile sets the provided apparmor profile to the spec diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go index 08f4268..ba613c3 100644 --- a/contrib/apparmor/template.go +++ b/contrib/apparmor/template.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -24,15 +25,13 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "os" - "os/exec" "path" "strconv" "strings" "text/template" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) // NOTE: This code is copied from . @@ -125,12 +124,12 @@ func loadData(name string) (*data, error) { } ver, err := getVersion() if err != nil { - return nil, errors.Wrap(err, "get apparmor_parser version") + return nil, fmt.Errorf("get apparmor_parser version: %w", err) } p.Version = ver // Figure out the daemon profile. - currentProfile, err := ioutil.ReadFile("/proc/self/attr/current") + currentProfile, err := os.ReadFile("/proc/self/attr/current") if err != nil { // If we couldn't get the daemon profile, assume we are running // unconfined which is generally the default. @@ -152,7 +151,7 @@ func generate(p *data, o io.Writer) error { func load(path string) error { out, err := aaParser("-Kr", path) if err != nil { - return errors.Errorf("%s: %s", err, out) + return fmt.Errorf("parser error(%q): %w", strings.TrimSpace(out), err) } return nil } @@ -165,10 +164,7 @@ func macroExists(m string) bool { func aaParser(args ...string) (string, error) { out, err := exec.Command("apparmor_parser", args...).CombinedOutput() - if err != nil { - return "", err - } - return string(out), nil + return string(out), err } func getVersion() (int, error) { diff --git a/contrib/apparmor/template_test.go b/contrib/apparmor/template_test.go index c49306a..9fd1a4f 100644 --- a/contrib/apparmor/template_test.go +++ b/contrib/apparmor/template_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package apparmor diff --git a/contrib/fuzz/archive_fuzzer.go b/contrib/fuzz/archive_fuzzer.go new file mode 100644 index 0000000..1fd87a1 --- /dev/null +++ b/contrib/fuzz/archive_fuzzer.go @@ -0,0 +1,76 @@ +//go:build gofuzz +// +build gofuzz + +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fuzz + +import ( + "bytes" + "context" + "os" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/content/local" + imageArchive "github.com/containerd/containerd/images/archive" +) + +// FuzzApply implements a fuzzer that applies +// a fuzzed tar archive on a directory +func FuzzApply(data []byte) int { + f := fuzz.NewConsumer(data) + iters, err := f.GetInt() + if err != nil { + return 0 + } + maxIters := 20 + tmpDir, err := os.MkdirTemp("", "prefix-test") + if err != nil { + return 0 + } + defer os.RemoveAll(tmpDir) + for i := 0; i < iters%maxIters; i++ { + rBytes, err := f.TarBytes() + if err != nil { + return 0 + } + r := bytes.NewReader(rBytes) + _, _ = archive.Apply(context.Background(), tmpDir, r) + } + return 1 +} + +// FuzzImportIndex implements a fuzzer +// that targets archive.ImportIndex() +func FuzzImportIndex(data []byte) int { + f := fuzz.NewConsumer(data) + tarBytes, err := f.TarBytes() + if err != nil { + return 0 + } + ctx := context.Background() + r := bytes.NewReader(tarBytes) + tmpdir, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return 0 + } + cs, err := local.NewStore(tmpdir) + if err != nil { + return 0 + } + _, _ = imageArchive.ImportIndex(ctx, cs, r) + return 1 +} diff --git a/vendor/github.com/containerd/continuity/devices/devices_windows.go b/contrib/fuzz/cap_fuzzer.go similarity index 74% rename from vendor/github.com/containerd/continuity/devices/devices_windows.go rename to contrib/fuzz/cap_fuzzer.go index 04627c8..f03a563 100644 --- a/vendor/github.com/containerd/continuity/devices/devices_windows.go +++ b/contrib/fuzz/cap_fuzzer.go @@ -1,12 +1,12 @@ +//go:build gofuzz +// +build gofuzz + /* Copyright The containerd Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,14 +14,15 @@ limitations under the License. */ -package devices +package fuzz import ( - "os" + "bytes" - "github.com/pkg/errors" + "github.com/containerd/containerd/pkg/cap" ) -func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { - return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows") +func FuzzParseProcPIDStatus(data []byte) int { + _, _ = cap.ParseProcPIDStatus(bytes.NewReader(data)) + return 1 } diff --git a/contrib/fuzz/container_fuzzer.go b/contrib/fuzz/container_fuzzer.go new file mode 100644 index 0000000..1738dbe --- /dev/null +++ b/contrib/fuzz/container_fuzzer.go @@ -0,0 +1,453 @@ +//go:build gofuzz +// +build gofuzz + +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + To run this fuzzer, it must first be moved to + integration/client. OSS-fuzz does this automatically + everytime it builds the fuzzers. +*/ + +package client + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "github.com/containerd/containerd" + "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/sys" + exec "golang.org/x/sys/execabs" +) + +var ( + haveDownloadedbinaries = false + haveExtractedBinaries = false + haveChangedPATH = false + haveInitialized = false + + downloadLink = "https://github.com/containerd/containerd/releases/download/v1.5.4/containerd-1.5.4-linux-amd64.tar.gz" + downloadPath = "/tmp/containerd-1.5.4-linux-amd64.tar.gz" + binariesDir = "/tmp/containerd-binaries" +) + +// downloadFile downloads a file from a url +func downloadFile(filepath string, url string) (err error) { + + out, err := os.Create(filepath) + if err != nil { + return err + } + defer out.Close() + + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = io.Copy(out, resp.Body) + if err != nil { + return err + } + + return nil +} + +// initInSteps() performs initialization in several steps +// The reason for spreading the initialization out in +// multiple steps is that each fuzz iteration can maximum +// take 25 seconds when running through OSS-fuzz. +// Should an iteration exceed that, then the fuzzer stops. +func initInSteps() bool { + // Download binaries + if !haveDownloadedbinaries { + err := downloadFile(downloadPath, downloadLink) + if err != nil { + panic(err) + } + haveDownloadedbinaries = true + } + // Extract binaries + if !haveExtractedBinaries { + err := os.MkdirAll(binariesDir, 0777) + if err != nil { + return true + } + cmd := exec.Command("tar", "xvf", downloadPath, "-C", binariesDir) + err = cmd.Run() + if err != nil { + return true + } + haveExtractedBinaries = true + return true + } + // Add binaries to $PATH: + if !haveChangedPATH { + oldPathEnv := os.Getenv("PATH") + newPathEnv := fmt.Sprintf("%s/bin:%s", binariesDir, oldPathEnv) + err := os.Setenv("PATH", newPathEnv) + if err != nil { + return true + } + haveChangedPATH = true + return true + } + haveInitialized = true + return false +} + +func tearDown() error { + if err := ctrd.Stop(); err != nil { + if err := ctrd.Kill(); err != nil { + return err + } + } + if err := ctrd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return err + } + } + if err := sys.ForceRemoveAll(defaultRoot); err != nil { + return err + } + + return nil +} + +// checkIfShouldRestart() checks if an error indicates that +// the daemon is not running. If the daemon is not running, +// it deletes it to allow the fuzzer to create a new and +// working socket. +func checkIfShouldRestart(err error) { + if strings.Contains(err.Error(), "daemon is not running") { + deleteSocket() + } +} + +// startDaemon() starts the daemon. +func startDaemon(ctx context.Context, shouldTearDown bool) { + buf := bytes.NewBuffer(nil) + stdioFile, err := os.CreateTemp("", "") + if err != nil { + // We panic here as it is a fuzz-blocker that + // may need fixing + panic(err) + } + defer func() { + stdioFile.Close() + os.Remove(stdioFile.Name()) + }() + ctrdStdioFilePath = stdioFile.Name() + stdioWriter := io.MultiWriter(stdioFile, buf) + err = ctrd.start("containerd", address, []string{ + "--root", defaultRoot, + "--state", defaultState, + "--log-level", "debug", + "--config", createShimDebugConfig(), + }, stdioWriter, stdioWriter) + if err != nil { + // We are fine if the error is that the daemon is already running, + // but if the error is another, then it will be a fuzz blocker, + // so we panic + if !strings.Contains(err.Error(), "daemon is already running") { + fmt.Fprintf(os.Stderr, "%s: %s\n", err, buf.String()) + } + } + if shouldTearDown { + defer func() { + err = tearDown() + if err != nil { + checkIfShouldRestart(err) + } + }() + } + seconds := 4 * time.Second + waitCtx, waitCancel := context.WithTimeout(ctx, seconds) + + _, err = ctrd.waitForStart(waitCtx) + waitCancel() + if err != nil { + ctrd.Stop() + ctrd.Kill() + ctrd.Wait() + fmt.Fprintf(os.Stderr, "%s: %s\n", err, buf.String()) + return + } +} + +// deleteSocket() deletes the socket in the file system. +// This is needed because the socket occasionally will +// refuse a connection to it, and deleting it allows us +// to create a new socket when invoking containerd.New() +func deleteSocket() error { + err := os.Remove(defaultAddress) + if err != nil { + return err + } + return nil +} + +// updatePathEnv() creates an empty directory in which +// the fuzzer will create the containerd socket. +// updatePathEnv() furthermore adds "/out/containerd-binaries" +// to $PATH, since the binaries are available there. +func updatePathEnv() error { + // Create test dir for socket + err := os.MkdirAll(defaultState, 0777) + if err != nil { + return err + } + + oldPathEnv := os.Getenv("PATH") + newPathEnv := oldPathEnv + ":/out/containerd-binaries" + err = os.Setenv("PATH", newPathEnv) + if err != nil { + return err + } + haveInitialized = true + return nil +} + +// checkAndDoUnpack checks if an image is unpacked. +// If it is not unpacked, then we may or may not +// unpack it. The fuzzer decides. +func checkAndDoUnpack(image containerd.Image, ctx context.Context, f *fuzz.ConsumeFuzzer) { + unpacked, err := image.IsUnpacked(ctx, testSnapshotter) + if err == nil && unpacked { + shouldUnpack, err := f.GetBool() + if err == nil && shouldUnpack { + _ = image.Unpack(ctx, testSnapshotter) + } + } +} + +// getImage() returns an image from the client. +// The fuzzer decides which image is returned. +func getImage(client *containerd.Client, f *fuzz.ConsumeFuzzer) (containerd.Image, error) { + images, err := client.ListImages(nil) + if err != nil { + return nil, err + } + imageIndex, err := f.GetInt() + if err != nil { + return nil, err + } + image := images[imageIndex%len(images)] + return image, nil + +} + +// newContainer creates and returns a container +// The fuzzer decides how the container is created +func newContainer(client *containerd.Client, f *fuzz.ConsumeFuzzer, ctx context.Context) (containerd.Container, error) { + + // determiner determines how we should create the container + determiner, err := f.GetInt() + if err != nil { + return nil, err + } + id, err := f.GetString() + if err != nil { + return nil, err + } + + if determiner%1 == 0 { + // Create a container with oci specs + spec := &oci.Spec{} + err = f.GenerateStruct(spec) + if err != nil { + return nil, err + } + container, err := client.NewContainer(ctx, id, + containerd.WithSpec(spec)) + if err != nil { + return nil, err + } + return container, nil + } else if determiner%2 == 0 { + // Create a container with fuzzed oci specs + // and an image + image, err := getImage(client, f) + if err != nil { + return nil, err + } + // Fuzz a few image APIs + _, _ = image.Size(ctx) + checkAndDoUnpack(image, ctx, f) + + spec := &oci.Spec{} + err = f.GenerateStruct(spec) + if err != nil { + return nil, err + } + container, err := client.NewContainer(ctx, + id, + containerd.WithImage(image), + containerd.WithSpec(spec)) + if err != nil { + return nil, err + } + return container, nil + } else { + // Create a container with an image + image, err := getImage(client, f) + if err != nil { + return nil, err + } + // Fuzz a few image APIs + _, _ = image.Size(ctx) + checkAndDoUnpack(image, ctx, f) + + container, err := client.NewContainer(ctx, + id, + containerd.WithImage(image)) + if err != nil { + return nil, err + } + return container, nil + } + return nil, errors.New("Could not create container") +} + +// doFuzz() implements the logic of FuzzCreateContainerNoTearDown() +// and FuzzCreateContainerWithTearDown() and allows for +// the option to turn on/off teardown after each iteration. +// From a high level it: +// - Creates a client +// - Imports a bunch of fuzzed tar archives +// - Creates a bunch of containers +func doFuzz(data []byte, shouldTearDown bool) int { + ctx, cancel := testContext(nil) + defer cancel() + + // Check if daemon is running and start it if it isn't + if ctrd.cmd == nil { + startDaemon(ctx, shouldTearDown) + } + client, err := containerd.New(defaultAddress) + if err != nil { + // The error here is most likely with the socket. + // Deleting it will allow the creation of a new + // socket during next fuzz iteration. + deleteSocket() + return -1 + } + defer client.Close() + + f := fuzz.NewConsumer(data) + + // Begin import tars: + noOfImports, err := f.GetInt() + if err != nil { + return 0 + } + // maxImports is currently completely arbitrarily defined + maxImports := 30 + for i := 0; i < noOfImports%maxImports; i++ { + + // f.TarBytes() returns valid tar bytes. + tarBytes, err := f.TarBytes() + if err != nil { + return 0 + } + _, _ = client.Import(ctx, bytes.NewReader(tarBytes)) + } + // End import tars + + // Begin create containers: + existingImages, err := client.ListImages(ctx) + if err != nil { + return 0 + } + if len(existingImages) > 0 { + noOfContainers, err := f.GetInt() + if err != nil { + return 0 + } + // maxNoOfContainers is currently + // completely arbitrarily defined + maxNoOfContainers := 50 + for i := 0; i < noOfContainers%maxNoOfContainers; i++ { + container, err := newContainer(client, f, ctx) + if err == nil { + defer container.Delete(ctx, containerd.WithSnapshotCleanup) + } + } + } + // End create containers + + return 1 +} + +// FuzzCreateContainerNoTearDown() implements a fuzzer +// similar to FuzzCreateContainerWithTearDown() and +// FuzzCreateContainerWithTearDown(), but it takes a +// different approach to the initialization. Where +// the other 2 fuzzers depend on the containerd binaries +// that were built manually, this fuzzer downloads them +// when starting a fuzz run. +// This fuzzer is experimental for now and is being run +// continuously by OSS-fuzz to collect feedback on +// its sustainability. +func FuzzNoTearDownWithDownload(data []byte) int { + if !haveInitialized { + shouldRestart := initInSteps() + if shouldRestart { + return 0 + } + } + ret := doFuzz(data, false) + return ret +} + +// FuzzCreateContainerNoTearDown() implements a fuzzer +// similar to FuzzCreateContainerWithTearDown() with +// with one minor distinction: One tears down the +// daemon after each iteration whereas the other doesn't. +// The two fuzzers' performance will be compared over time. +func FuzzCreateContainerNoTearDown(data []byte) int { + if !haveInitialized { + err := updatePathEnv() + if err != nil { + return 0 + } + } + ret := doFuzz(data, false) + return ret +} + +// FuzzCreateContainerWithTearDown() is similar to +// FuzzCreateContainerNoTearDown() except that +// FuzzCreateContainerWithTearDown tears down the daemon +// after each iteration. +func FuzzCreateContainerWithTearDown(data []byte) int { + if !haveInitialized { + err := updatePathEnv() + if err != nil { + return 0 + } + } + ret := doFuzz(data, true) + return ret +} diff --git a/contrib/fuzz/containerd_import_fuzzer.go b/contrib/fuzz/containerd_import_fuzzer.go new file mode 100644 index 0000000..6874e8c --- /dev/null +++ b/contrib/fuzz/containerd_import_fuzzer.go @@ -0,0 +1,87 @@ +//go:build gofuzz +// +build gofuzz + +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fuzz + +import ( + "bytes" + "context" + "sync" + "time" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + + "github.com/containerd/containerd" + _ "github.com/containerd/containerd/cmd/containerd" + "github.com/containerd/containerd/cmd/containerd/command" + "github.com/containerd/containerd/namespaces" +) + +const ( + defaultRoot = "/var/lib/containerd" + defaultState = "/tmp/containerd" + defaultAddress = "/tmp/containerd/containerd.sock" +) + +var ( + initDaemon sync.Once +) + +func startDaemon() { + args := []string{"--log-level", "debug"} + go func() { + // This is similar to invoking the + // containerd binary. + // See contrib/fuzz/oss_fuzz_build.sh + // for more info. + command.StartDaemonForFuzzing(args) + }() + time.Sleep(time.Second * 4) +} + +func fuzzContext() (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + ctx = namespaces.WithNamespace(ctx, "fuzzing-namespace") + return ctx, cancel +} + +func FuzzContainerdImport(data []byte) int { + initDaemon.Do(startDaemon) + + client, err := containerd.New(defaultAddress) + if err != nil { + return 0 + } + defer client.Close() + + f := fuzz.NewConsumer(data) + + noOfImports, err := f.GetInt() + if err != nil { + return 0 + } + maxImports := 20 + ctx, cancel := fuzzContext() + defer cancel() + for i := 0; i < noOfImports%maxImports; i++ { + tarBytes, err := f.GetBytes() + if err != nil { + return 0 + } + _, _ = client.Import(ctx, bytes.NewReader(tarBytes)) + } + return 1 +} diff --git a/contrib/fuzz/content_fuzzer.go b/contrib/fuzz/content_fuzzer.go new file mode 100644 index 0000000..da25650 --- /dev/null +++ b/contrib/fuzz/content_fuzzer.go @@ -0,0 +1,169 @@ +//go:build gofuzz +// +build gofuzz + +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +//nolint:golint +package fuzz + +import ( + "bytes" + "context" + _ "crypto/sha256" // required by go-digest + "fmt" + "os" + "path/filepath" + "reflect" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/images/archive" +) + +// checkBlobPath performs some basic validation +func checkBlobPath(dgst digest.Digest, root string) error { + if err := dgst.Validate(); err != nil { + return err + } + path := filepath.Join(root, "blobs", dgst.Algorithm().String(), dgst.Hex()) + _, err := os.Stat(path) + if err != nil { + return err + } + return nil +} + +// generateBlobs is a helper function to create random blobs +func generateBlobs(f *fuzz.ConsumeFuzzer) (map[digest.Digest][]byte, error) { + blobs := map[digest.Digest][]byte{} + blobQty, err := f.GetInt() + if err != nil { + return blobs, err + } + maxsize := 4096 + nblobs := blobQty % maxsize + + for i := 0; i < nblobs; i++ { + digestBytes, err := f.GetBytes() + if err != nil { + return blobs, err + } + + dgst := digest.FromBytes(digestBytes) + blobs[dgst] = digestBytes + } + + return blobs, nil +} + +// checkwrite is a wrapper around content.WriteBlob() +func checkWrite(ctx context.Context, cs content.Store, dgst digest.Digest, p []byte) (digest.Digest, error) { + if err := content.WriteBlob(ctx, cs, dgst.String(), bytes.NewReader(p), + ocispec.Descriptor{Size: int64(len(p)), Digest: dgst}); err != nil { + return dgst, err + } + return dgst, nil +} + +// populateBlobStore creates a bunch of blobs +func populateBlobStore(ctx context.Context, cs content.Store, f *fuzz.ConsumeFuzzer) (map[digest.Digest][]byte, error) { + blobs, err := generateBlobs(f) + if err != nil { + return nil, err + } + + for dgst, p := range blobs { + _, err := checkWrite(ctx, cs, dgst, p) + if err != nil { + return blobs, err + } + } + return blobs, nil +} + +// FuzzCSWalk implements a fuzzer that targets contentStore.Walk() +func FuzzCSWalk(data []byte) int { + ctx := context.Background() + expected := map[digest.Digest]struct{}{} + found := map[digest.Digest]struct{}{} + tmpdir, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return 0 + } + defer os.RemoveAll(tmpdir) + cs, err := local.NewStore(tmpdir) + if err != nil { + return 0 + } + + f := fuzz.NewConsumer(data) + blobs, err := populateBlobStore(ctx, cs, f) + if err != nil { + return 0 + } + + for dgst := range blobs { + expected[dgst] = struct{}{} + } + + if err := cs.Walk(ctx, func(bi content.Info) error { + found[bi.Digest] = struct{}{} + err = checkBlobPath(bi.Digest, tmpdir) + if err != nil { + return err + } + return nil + }); err != nil { + return 0 + } + if !reflect.DeepEqual(expected, found) { + panic(fmt.Sprintf("%v != %v but should be equal", found, expected)) + } + return 1 +} + +func FuzzArchiveExport(data []byte) int { + f := fuzz.NewConsumer(data) + manifest := ocispec.Descriptor{} + err := f.GenerateStruct(&manifest) + if err != nil { + return 0 + } + ctx := context.Background() + tmpdir, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return 0 + } + defer os.RemoveAll(tmpdir) + cs, err := local.NewStore(tmpdir) + if err != nil { + return 0 + } + _, err = populateBlobStore(ctx, cs, f) + if err != nil { + return 0 + } + w, err := os.Create("fuzz-output-file") + if err != nil { + return 0 + } + defer w.Close() + defer os.Remove("fuzz-output-file") + _ = archive.Export(ctx, cs, w, archive.WithManifest(manifest, "name")) + return 1 +} diff --git a/vendor/github.com/containerd/fifo/utils.go b/contrib/fuzz/cri_fuzzer.go similarity index 58% rename from vendor/github.com/containerd/fifo/utils.go rename to contrib/fuzz/cri_fuzzer.go index bbdf790..03d1749 100644 --- a/vendor/github.com/containerd/fifo/utils.go +++ b/contrib/fuzz/cri_fuzzer.go @@ -1,12 +1,12 @@ +//go:build gofuzz +// +build gofuzz + /* Copyright The containerd Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,22 +14,26 @@ limitations under the License. */ -package fifo +package fuzz -import "os" +import ( + fuzz "github.com/AdaLogics/go-fuzz-headers" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" -// IsFifo checks if a file is a (named pipe) fifo -// if the file does not exist then it returns false -func IsFifo(path string) (bool, error) { - stat, err := os.Stat(path) + "github.com/containerd/containerd/pkg/cri/server" +) + +func FuzzParseAuth(data []byte) int { + f := fuzz.NewConsumer(data) + auth := &runtime.AuthConfig{} + err := f.GenerateStruct(auth) if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err + return 0 } - if stat.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return true, nil + host, err := f.GetString() + if err != nil { + return 0 } - return false, nil + _, _, _ = server.ParseAuth(auth, host) + return 1 } diff --git a/contrib/fuzz/docker_fuzzer.go b/contrib/fuzz/docker_fuzzer.go new file mode 100644 index 0000000..7f4213b --- /dev/null +++ b/contrib/fuzz/docker_fuzzer.go @@ -0,0 +1,86 @@ +//go:build gofuzz +// +build gofuzz + +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + This fuzzer is run continuously by OSS-fuzz. + It is stored in contrib/fuzz for organization, + but in order to execute it, it must be moved to + remotes/docker first. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + + refDocker "github.com/containerd/containerd/reference/docker" +) + +func FuzzFetcher(data []byte) int { + dataLen := len(data) + if dataLen == 0 { + return -1 + } + + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen)) + rw.Header().Set("content-length", fmt.Sprintf("%d", dataLen)) + rw.Write(data) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + return 0 + } + + f := dockerFetcher{&dockerBase{ + repository: "nonempty", + }} + host := RegistryHost{ + Client: s.Client(), + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + } + + ctx := context.Background() + req := f.request(host, http.MethodGet) + rc, err := f.open(ctx, req, "", 0) + if err != nil { + return 0 + } + b, err := io.ReadAll(rc) + if err != nil { + return 0 + } + + expected := data + if len(b) != len(expected) { + panic("len of request is not equal to len of expected but should be") + } + return 1 +} + +func FuzzParseDockerRef(data []byte) int { + _, _ = refDocker.ParseDockerRef(string(data)) + return 1 +} diff --git a/contrib/fuzz/filters_fuzzers.go b/contrib/fuzz/filters_fuzzers.go index 9440c46..d78a0af 100644 --- a/contrib/fuzz/filters_fuzzers.go +++ b/contrib/fuzz/filters_fuzzers.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz /* diff --git a/contrib/fuzz/metadata_fuzzer.go b/contrib/fuzz/metadata_fuzzer.go new file mode 100644 index 0000000..ba1f3db --- /dev/null +++ b/contrib/fuzz/metadata_fuzzer.go @@ -0,0 +1,405 @@ +//go:build gofuzz +// +build gofuzz + +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fuzz + +import ( + "context" + "fmt" + "os" + "path/filepath" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + bolt "go.etcd.io/bbolt" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/native" +) + +func testEnv() (context.Context, *bolt.DB, func(), error) { + ctx, cancel := context.WithCancel(context.Background()) + ctx = namespaces.WithNamespace(ctx, "testing") + + dirname, err := os.MkdirTemp("", "fuzz-") + if err != nil { + return ctx, nil, nil, err + } + + db, err := bolt.Open(filepath.Join(dirname, "meta.db"), 0644, nil) + if err != nil { + return ctx, nil, nil, err + } + + return ctx, db, func() { + db.Close() + _ = os.RemoveAll(dirname) + cancel() + }, nil +} + +func FuzzImageStore(data []byte) int { + ctx, db, cancel, err := testEnv() + if err != nil { + return 0 + } + defer cancel() + store := metadata.NewImageStore(metadata.NewDB(db, nil, nil)) + f := fuzz.NewConsumer(data) + noOfOperations, err := f.GetInt() + if err != nil { + return 0 + } + maxOperations := 50 + for i := 0; i < noOfOperations%maxOperations; i++ { + opType, err := f.GetInt() + if err != nil { + return 0 + } + if opType%1 == 0 { + i := images.Image{} + err := f.GenerateStruct(&i) + if err != nil { + return 0 + } + _, _ = store.Create(ctx, i) + } else if opType%2 == 0 { + newFs, err := f.GetString() + if err != nil { + return 0 + } + _, _ = store.List(ctx, newFs) + } else if opType%3 == 0 { + i := images.Image{} + err := f.GenerateStruct(&i) + if err != nil { + return 0 + } + _, _ = store.Update(ctx, i) + } else if opType%4 == 0 { + name, err := f.GetString() + if err != nil { + return 0 + } + _ = store.Delete(ctx, name) + } + } + return 1 +} + +func FuzzLeaseManager(data []byte) int { + ctx, db, cancel, err := testEnv() + if err != nil { + return 0 + } + defer cancel() + lm := metadata.NewLeaseManager(metadata.NewDB(db, nil, nil)) + + f := fuzz.NewConsumer(data) + noOfOperations, err := f.GetInt() + if err != nil { + return 0 + } + maxOperations := 50 + for i := 0; i < noOfOperations%maxOperations; i++ { + opType, err := f.GetInt() + if err != nil { + return 0 + } + if opType%1 == 0 { + err := db.Update(func(tx *bolt.Tx) error { + sm := make(map[string]string) + err2 := f.FuzzMap(&sm) + if err2 != nil { + return err2 + } + _, _ = lm.Create(ctx, leases.WithLabels(sm)) + return nil + }) + if err != nil { + return 0 + } + } else if opType%2 == 0 { + _, _ = lm.List(ctx) + } else if opType%3 == 0 { + l := leases.Lease{} + err := f.GenerateStruct(&l) + if err != nil { + return 0 + } + r := leases.Resource{} + err = f.GenerateStruct(&r) + if err != nil { + return 0 + } + db.Update(func(tx *bolt.Tx) error { + _ = lm.AddResource(metadata.WithTransactionContext(ctx, tx), l, r) + return nil + }) + } else if opType%4 == 0 { + l := leases.Lease{} + err = f.GenerateStruct(&l) + if err != nil { + return 0 + } + _ = lm.Delete(ctx, l) + } else if opType%5 == 0 { + l := leases.Lease{} + err := f.GenerateStruct(&l) + if err != nil { + return 0 + } + r := leases.Resource{} + err = f.GenerateStruct(&r) + if err != nil { + return 0 + } + _ = lm.DeleteResource(ctx, l, r) + } else if opType%6 == 0 { + l := leases.Lease{} + err := f.GenerateStruct(&l) + if err != nil { + return 0 + } + _, _ = lm.ListResources(ctx, l) + } + } + return 1 +} + +func FuzzContainerStore(data []byte) int { + ctx, db, cancel, err := testEnv() + if err != nil { + return 0 + } + defer cancel() + + store := metadata.NewContainerStore(metadata.NewDB(db, nil, nil)) + c := containers.Container{} + f := fuzz.NewConsumer(data) + noOfOperations, err := f.GetInt() + if err != nil { + return 0 + } + maxOperations := 50 + for i := 0; i < noOfOperations%maxOperations; i++ { + opType, err := f.GetInt() + if err != nil { + return 0 + } + if opType%1 == 0 { + err := f.GenerateStruct(&c) + if err != nil { + return 0 + } + db.Update(func(tx *bolt.Tx) error { + _, _ = store.Create(metadata.WithTransactionContext(ctx, tx), c) + return nil + }) + } else if opType%2 == 0 { + filt, err := f.GetString() + if err != nil { + return 0 + } + _, _ = store.List(ctx, filt) + } else if opType%3 == 0 { + id, err := f.GetString() + if err != nil { + return 0 + } + _ = store.Delete(ctx, id) + } else if opType%4 == 0 { + fieldpaths, err := f.GetString() + if err != nil { + return 0 + } + _, _ = store.Update(ctx, c, fieldpaths) + } else if opType%5 == 0 { + id, err := f.GetString() + if err != nil { + return 0 + } + _, _ = store.Get(ctx, id) + } + } + return 1 +} + +type testOptions struct { + extraSnapshots map[string]func(string) (snapshots.Snapshotter, error) +} + +type testOpt func(*testOptions) + +func testDB(opt ...testOpt) (context.Context, *metadata.DB, func(), error) { + ctx, cancel := context.WithCancel(context.Background()) + ctx = namespaces.WithNamespace(ctx, "testing") + + var topts testOptions + + for _, o := range opt { + o(&topts) + } + + dirname, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return ctx, nil, func() { cancel() }, err + } + defer os.RemoveAll(dirname) + + snapshotter, err := native.NewSnapshotter(filepath.Join(dirname, "native")) + if err != nil { + return ctx, nil, func() { cancel() }, err + } + + snapshotters := map[string]snapshots.Snapshotter{ + "native": snapshotter, + } + + for name, fn := range topts.extraSnapshots { + snapshotter, err := fn(filepath.Join(dirname, name)) + if err != nil { + return ctx, nil, func() { cancel() }, err + } + snapshotters[name] = snapshotter + } + + cs, err := local.NewStore(filepath.Join(dirname, "content")) + if err != nil { + return ctx, nil, func() { cancel() }, err + } + + bdb, err := bolt.Open(filepath.Join(dirname, "metadata.db"), 0644, nil) + if err != nil { + return ctx, nil, func() { cancel() }, err + } + + db := metadata.NewDB(bdb, cs, snapshotters) + if err := db.Init(ctx); err != nil { + return ctx, nil, func() { cancel() }, err + } + + return ctx, db, func() { + bdb.Close() + if err := os.RemoveAll(dirname); err != nil { + fmt.Println("Failed removing temp dir") + } + cancel() + }, nil +} + +func FuzzContentStore(data []byte) int { + ctx, db, cancel, err := testDB() + defer cancel() + if err != nil { + return 0 + } + + cs := db.ContentStore() + f := fuzz.NewConsumer(data) + noOfOperations, err := f.GetInt() + if err != nil { + return 0 + } + maxOperations := 50 + for i := 0; i < noOfOperations%maxOperations; i++ { + opType, err := f.GetInt() + if err != nil { + return 0 + } + if opType%1 == 0 { + blob, err := f.GetBytes() + if err != nil { + return 0 + } + dgst := digest.FromBytes(blob) + err = dgst.Validate() + if err != nil { + return 0 + } + _, _ = cs.Info(ctx, dgst) + } else if opType%2 == 0 { + info := content.Info{} + err = f.GenerateStruct(&info) + if err != nil { + return 0 + } + _, _ = cs.Update(ctx, info) + } else if opType%3 == 0 { + walkFn := func(info content.Info) error { + return nil + } + _ = cs.Walk(ctx, walkFn) + } else if opType%4 == 0 { + blob, err := f.GetBytes() + if err != nil { + return 0 + } + dgst := digest.FromBytes(blob) + err = dgst.Validate() + if err != nil { + return 0 + } + _ = cs.Delete(ctx, dgst) + } else if opType%5 == 0 { + _, _ = cs.ListStatuses(ctx) + } else if opType%6 == 0 { + ref, err := f.GetString() + if err != nil { + return 0 + } + _, _ = cs.Status(ctx, ref) + } else if opType%7 == 0 { + ref, err := f.GetString() + if err != nil { + return 0 + } + _ = cs.Abort(ctx, ref) + } else if opType%8 == 0 { + desc := ocispec.Descriptor{} + err = f.GenerateStruct(&desc) + if err != nil { + return 0 + } + ref, err := f.GetString() + if err != nil { + return 0 + } + csWriter, err := cs.Writer(ctx, + content.WithDescriptor(desc), + content.WithRef(ref)) + if err != nil { + return 0 + } + defer csWriter.Close() + p, err := f.GetBytes() + if err != nil { + return 0 + } + _, _ = csWriter.Write(p) + _ = csWriter.Commit(ctx, 0, csWriter.Digest()) + } + } + return 1 +} diff --git a/contrib/fuzz/oss_fuzz_build.sh b/contrib/fuzz/oss_fuzz_build.sh new file mode 100755 index 0000000..f3814ce --- /dev/null +++ b/contrib/fuzz/oss_fuzz_build.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o nounset +set -o pipefail +set -o errexit +set -x + +cd "$(dirname "${BASH_SOURCE[0]}")" +cd ../../ + +# Move all fuzzers that don't have the "fuzz" package out of this dir +mv contrib/fuzz/docker_fuzzer.go remotes/docker/ +mv contrib/fuzz/container_fuzzer.go integration/client/ + + +# Change path of socket since OSS-fuzz does not grant access to /run +sed -i 's/\/run\/containerd/\/tmp\/containerd/g' $SRC/containerd/defaults/defaults_unix.go + +# To build FuzzContainer2 we need to prepare a few things: +# We change the name of the cmd/containerd package +# so that we can import it. +# We furthermore add an exported function that is similar +# to cmd/containerd.main and call that instead of calling +# the containerd binary. +# +# In the fuzzer we import cmd/containerd as a low-maintenance +# way of initializing all the plugins. +# Make backup of cmd/containerd: +cp -r $SRC/containerd/cmd/containerd $SRC/cmd-containerd-backup +# Rename package: +find $SRC/containerd/cmd/containerd -type f -exec sed -i 's/package main/package mainfuzz/g' {} \; +# Add an exported function +sed -i -e '$afunc StartDaemonForFuzzing(arguments []string) {\n\tapp := App()\n\t_ = app.Run(arguments)\n}' $SRC/containerd/cmd/containerd/command/main.go +# Build fuzzer: +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzContainerdImport fuzz_containerd_import +# Reinstante backup of cmd/containerd: +mv $SRC/cmd-containerd-backup $SRC/containerd/cmd/containerd + +# Compile more fuzzers +compile_go_fuzzer github.com/containerd/containerd/remotes/docker FuzzFetcher fuzz_fetcher +compile_go_fuzzer github.com/containerd/containerd/remotes/docker FuzzParseDockerRef fuzz_parse_docker_ref +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzFiltersParse fuzz_filters_parse +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzPlatformsParse fuzz_platforms_parse +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzApply fuzz_apply +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzImportIndex fuzz_import_index +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzCSWalk fuzz_cs_walk +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzArchiveExport fuzz_archive_export +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzParseAuth fuzz_parse_auth +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzParseProcPIDStatus fuzz_parse_proc_pid_status +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzImageStore fuzz_image_store +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzLeaseManager fuzz_lease_manager +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzContainerStore fuzz_container_store +compile_go_fuzzer github.com/containerd/containerd/contrib/fuzz FuzzContentStore fuzz_content_store + + +# The below fuzzers require more setup than the fuzzers above. +# We need the binaries from "make". +wget -c https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip +unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local + +export CGO_ENABLED=1 +export GOARCH=amd64 + +# Build runc +cd $SRC/ +git clone https://github.com/opencontainers/runc --branch release-1.0 +cd runc +make +make install + +# Build static containerd +cd $SRC/containerd +make EXTRA_FLAGS="-buildmode pie" \ + EXTRA_LDFLAGS='-linkmode external -extldflags "-fno-PIC -static"' \ + BUILDTAGS="netgo osusergo static_build" + + +mkdir $OUT/containerd-binaries || true +cd $SRC/containerd/bin && cp * $OUT/containerd-binaries/ && cd - + +# Change defaultState and defaultAddress fron /run/containerd-test to /tmp/containerd-test: +sed -i 's/\/run\/containerd-test/\/tmp\/containerd-test/g' $SRC/containerd/integration/client/client_unix_test.go + +cd integration/client + +# Rename all *_test.go to *_test_fuzz.go to use their declarations: +for i in $( ls *_test.go ); do mv $i ./${i%.*}_fuzz.go; done + +# Remove windows test to avoid double declarations: +rm ./client_windows_test_fuzz.go +rm ./helpers_windows_test_fuzz.go +compile_go_fuzzer github.com/containerd/containerd/integration/client FuzzCreateContainerNoTearDown fuzz_create_container_no_teardown +compile_go_fuzzer github.com/containerd/containerd/integration/client FuzzCreateContainerWithTearDown fuzz_create_container_with_teardown +compile_go_fuzzer github.com/containerd/containerd/integration/client FuzzNoTearDownWithDownload fuzz_no_teardown_with_download diff --git a/contrib/fuzz/platforms_fuzzers.go b/contrib/fuzz/platforms_fuzzers.go index 8b02c83..cf418a9 100644 --- a/contrib/fuzz/platforms_fuzzers.go +++ b/contrib/fuzz/platforms_fuzzers.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz /* diff --git a/contrib/gce/cloud-init/master.yaml b/contrib/gce/cloud-init/master.yaml index 35c869c..1b2aea4 100644 --- a/contrib/gce/cloud-init/master.yaml +++ b/contrib/gce/cloud-init/master.yaml @@ -75,7 +75,8 @@ write_files: content: | [Unit] Description=Download and install k8s binaries and configurations - After=network-online.target + After=network-online.target containerd.target + Wants=network-online.target containerd.target [Service] Type=oneshot diff --git a/contrib/gce/cloud-init/node.yaml b/contrib/gce/cloud-init/node.yaml index 97d7ad4..56dae24 100644 --- a/contrib/gce/cloud-init/node.yaml +++ b/contrib/gce/cloud-init/node.yaml @@ -69,7 +69,8 @@ write_files: content: | [Unit] Description=Download and install k8s binaries and configurations - After=network-online.target + After=network-online.target containerd.target + Wants=network-online.target containerd.target [Service] Type=oneshot diff --git a/contrib/gce/cni.template b/contrib/gce/cni.template index c8c0d7f..adf5439 100644 --- a/contrib/gce/cni.template +++ b/contrib/gce/cni.template @@ -1,6 +1,6 @@ { "name": "k8s-pod-network", - "cniVersion": "0.3.1", + "cniVersion": "1.0.0", "plugins": [ { "type": "ptp", diff --git a/contrib/gce/configure.sh b/contrib/gce/configure.sh index 741684e..0499f0b 100755 --- a/contrib/gce/configure.sh +++ b/contrib/gce/configure.sh @@ -114,7 +114,7 @@ if [ "${CONTAINERD_TEST:-"false"}" != "true" ]; then # CONTAINERD_VERSION is the cri-containerd version to use. version=${CONTAINERD_VERSION:-""} else - deploy_path=${CONTAINERD_DEPLOY_PATH:-"cri-containerd-staging"} + deploy_path=${CONTAINERD_DEPLOY_PATH:-"k8s-staging-cri-tools"} # PULL_REFS_METADATA is the metadata key of PULL_REFS from prow. PULL_REFS_METADATA="PULL_REFS" @@ -176,6 +176,8 @@ if [ "${KUBERNETES_MASTER:-}" != "true" ]; then cni_template_path="" fi fi +# Use systemd cgroup if specified in env +systemdCgroup="${CONTAINERD_SYSTEMD_CGROUP:-"false"}" log_level="${CONTAINERD_LOG_LEVEL:-"info"}" max_container_log_line="${CONTAINERD_MAX_CONTAINER_LOG_LINE:-16384}" cat > ${config_path} < 0 || k.Major > 0 { + return fmt.Sprintf("%d.%d", k.Kernel, k.Major) + } + return "" +} + +var ( + currentKernelVersion *KernelVersion + kernelVersionError error + once sync.Once +) + +// getKernelVersion gets the current kernel version. +func getKernelVersion() (*KernelVersion, error) { + once.Do(func() { + var uts unix.Utsname + if err := unix.Uname(&uts); err != nil { + return + } + // Remove the \x00 from the release for Atoi to parse correctly + currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) + }) + return currentKernelVersion, kernelVersionError +} + +// parseRelease parses a string and creates a KernelVersion based on it. +func parseRelease(release string) (*KernelVersion, error) { + var version = KernelVersion{} + + // We're only make sure we get the "kernel" and "major revision". Sometimes we have + // 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64. + _, err := fmt.Sscanf(release, "%d.%d", &version.Kernel, &version.Major) + if err != nil { + return nil, fmt.Errorf("failed to parse kernel version %q: %w", release, err) + } + return &version, nil +} + +// GreaterEqualThan checks if the host's kernel version is greater than, or +// equal to the given kernel version v. Only "kernel version" and "major revision" +// can be specified (e.g., "3.12") and will be taken into account, which means +// that 3.12.25-gentoo and 3.12-1-amd64 are considered equal (kernel: 3, major: 12). +func GreaterEqualThan(minVersion KernelVersion) (bool, error) { + kv, err := getKernelVersion() + if err != nil { + return false, err + } + if kv.Kernel > minVersion.Kernel { + return true, nil + } + if kv.Kernel == minVersion.Kernel && kv.Major >= minVersion.Major { + return true, nil + } + return false, nil +} diff --git a/contrib/seccomp/kernelversion/kernel_linux_test.go b/contrib/seccomp/kernelversion/kernel_linux_test.go new file mode 100644 index 0000000..a45e191 --- /dev/null +++ b/contrib/seccomp/kernelversion/kernel_linux_test.go @@ -0,0 +1,141 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + File copied and customized based on + https://github.com/moby/moby/tree/v20.10.14/profiles/seccomp/kernel_linux_test.go +*/ + +package kernelversion + +import ( + "fmt" + "testing" +) + +func TestGetKernelVersion(t *testing.T) { + version, err := getKernelVersion() + if err != nil { + t.Fatal(err) + } + if version == nil { + t.Fatal("version is nil") + } + if version.Kernel == 0 { + t.Fatal("no kernel version") + } +} + +// TestParseRelease tests the ParseRelease() function +func TestParseRelease(t *testing.T) { + tests := []struct { + in string + out KernelVersion + expectedErr error + }{ + {in: "3.8", out: KernelVersion{Kernel: 3, Major: 8}}, + {in: "3.8.0", out: KernelVersion{Kernel: 3, Major: 8}}, + {in: "3.8.0-19-generic", out: KernelVersion{Kernel: 3, Major: 8}}, + {in: "3.4.54.longterm-1", out: KernelVersion{Kernel: 3, Major: 4}}, + {in: "3.10.0-862.2.3.el7.x86_64", out: KernelVersion{Kernel: 3, Major: 10}}, + {in: "3.12.8tag", out: KernelVersion{Kernel: 3, Major: 12}}, + {in: "3.12-1-amd64", out: KernelVersion{Kernel: 3, Major: 12}}, + {in: "3.12foobar", out: KernelVersion{Kernel: 3, Major: 12}}, + {in: "99.999.999-19-generic", out: KernelVersion{Kernel: 99, Major: 999}}, + {in: "", expectedErr: fmt.Errorf(`failed to parse kernel version "": EOF`)}, + {in: "3", expectedErr: fmt.Errorf(`failed to parse kernel version "3": unexpected EOF`)}, + {in: "3.", expectedErr: fmt.Errorf(`failed to parse kernel version "3.": EOF`)}, + {in: "3a", expectedErr: fmt.Errorf(`failed to parse kernel version "3a": input does not match format`)}, + {in: "3.a", expectedErr: fmt.Errorf(`failed to parse kernel version "3.a": expected integer`)}, + {in: "a", expectedErr: fmt.Errorf(`failed to parse kernel version "a": expected integer`)}, + {in: "a.a", expectedErr: fmt.Errorf(`failed to parse kernel version "a.a": expected integer`)}, + {in: "a.a.a-a", expectedErr: fmt.Errorf(`failed to parse kernel version "a.a.a-a": expected integer`)}, + {in: "-3", expectedErr: fmt.Errorf(`failed to parse kernel version "-3": expected integer`)}, + {in: "-3.", expectedErr: fmt.Errorf(`failed to parse kernel version "-3.": expected integer`)}, + {in: "-3.8", expectedErr: fmt.Errorf(`failed to parse kernel version "-3.8": expected integer`)}, + {in: "-3.-8", expectedErr: fmt.Errorf(`failed to parse kernel version "-3.-8": expected integer`)}, + {in: "3.-8", expectedErr: fmt.Errorf(`failed to parse kernel version "3.-8": expected integer`)}, + } + for _, tc := range tests { + tc := tc + t.Run(tc.in, func(t *testing.T) { + version, err := parseRelease(tc.in) + if tc.expectedErr != nil { + if err == nil { + t.Fatal("expected an error") + } + if err.Error() != tc.expectedErr.Error() { + t.Fatalf("expected: %s, got: %s", tc.expectedErr, err) + } + return + } + if err != nil { + t.Fatal("unexpected error:", err) + } + if version == nil { + t.Fatal("version is nil") + } + if version.Kernel != tc.out.Kernel || version.Major != tc.out.Major { + t.Fatalf("expected: %d.%d, got: %d.%d", tc.out.Kernel, tc.out.Major, version.Kernel, version.Major) + } + }) + } +} + +func TestGreaterEqualThan(t *testing.T) { + // Get the current kernel version, so that we can make test relative to that + v, err := getKernelVersion() + if err != nil { + t.Fatal(err) + } + + tests := []struct { + doc string + in KernelVersion + expected bool + }{ + { + doc: "same version", + in: KernelVersion{v.Kernel, v.Major}, + expected: true, + }, + { + doc: "kernel minus one", + in: KernelVersion{v.Kernel - 1, v.Major}, + expected: true, + }, + { + doc: "kernel plus one", + in: KernelVersion{v.Kernel + 1, v.Major}, + expected: false, + }, + { + doc: "major plus one", + in: KernelVersion{v.Kernel, v.Major + 1}, + expected: false, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.doc+": "+tc.in.String(), func(t *testing.T) { + ok, err := GreaterEqualThan(tc.in) + if err != nil { + t.Fatal("unexpected error:", err) + } + if ok != tc.expected { + t.Fatalf("expected: %v, got: %v", tc.expected, ok) + } + }) + } +} diff --git a/contrib/seccomp/seccomp.go b/contrib/seccomp/seccomp.go index b7cf176..5292cbc 100644 --- a/contrib/seccomp/seccomp.go +++ b/contrib/seccomp/seccomp.go @@ -20,7 +20,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "os" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" @@ -33,7 +33,7 @@ import ( func WithProfile(profile string) oci.SpecOpts { return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { s.Linux.Seccomp = &specs.LinuxSeccomp{} - f, err := ioutil.ReadFile(profile) + f, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("cannot load seccomp profile %q: %v", profile, err) } diff --git a/contrib/seccomp/seccomp_default.go b/contrib/seccomp/seccomp_default.go index 2876612..363bebd 100644 --- a/contrib/seccomp/seccomp_default.go +++ b/contrib/seccomp/seccomp_default.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -23,6 +24,7 @@ import ( "golang.org/x/sys/unix" + "github.com/containerd/containerd/contrib/seccomp/kernelversion" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -42,6 +44,9 @@ func arches() []specs.Arch { return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32} case "s390x": return []specs.Arch{specs.ArchS390, specs.ArchS390X} + case "riscv64": + // ArchRISCV32 (SCMP_ARCH_RISCV32) does not exist + return []specs.Arch{specs.ArchRISCV64} default: return []specs.Arch{} } @@ -127,6 +132,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "ftruncate64", "futex", "futex_time64", + "futex_waitv", "futimesat", "getcpu", "getcwd", @@ -183,6 +189,9 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "io_uring_setup", "ipc", "kill", + "landlock_add_rule", + "landlock_create_ruleset", + "landlock_restrict_self", "lchown", "lchown32", "lgetxattr", @@ -200,6 +209,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "madvise", "membarrier", "memfd_create", + "memfd_secret", "mincore", "mkdir", "mkdirat", @@ -228,6 +238,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "munlock", "munlockall", "munmap", + "name_to_handle_at", "nanosleep", "newfstatat", "_newselect", @@ -239,6 +250,9 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "pidfd_send_signal", "pipe", "pipe2", + "pkey_alloc", + "pkey_free", + "pkey_mprotect", "poll", "ppoll", "ppoll_time64", @@ -247,6 +261,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "preadv", "preadv2", "prlimit64", + "process_mrelease", "pselect6", "pselect6_time64", "pwrite64", @@ -460,12 +475,25 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { Syscalls: syscalls, } + // include by kernel version + if ok, err := kernelversion.GreaterEqualThan( + kernelversion.KernelVersion{Kernel: 4, Major: 8}); err == nil { + if ok { + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"ptrace"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + } + } + // include by arch switch runtime.GOARCH { case "ppc64le": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{ "sync_file_range2", + "swapcontext", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, @@ -510,6 +538,14 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) + case "riscv64": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "riscv_flush_icache", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) } admin := false @@ -535,11 +571,12 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "fspick", "lookup_dcookie", "mount", + "mount_setattr", "move_mount", - "name_to_handle_at", "open_tree", "perf_event_open", "quotactl", + "quotactl_fd", "setdomainname", "sethostname", "setns", @@ -607,6 +644,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "settimeofday", "stime", "clock_settime", + "clock_settime64", }, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, @@ -617,12 +655,34 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) + case "CAP_SYS_NICE": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "get_mempolicy", + "mbind", + "set_mempolicy", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) case "CAP_SYSLOG": s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ Names: []string{"syslog"}, Action: specs.ActAllow, Args: []specs.LinuxSeccompArg{}, }) + case "CAP_BPF": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"bpf"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_PERFMON": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"perf_event_open"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) } } diff --git a/contrib/seccomp/seccomp_default_unsupported.go b/contrib/seccomp/seccomp_default_unsupported.go index 6d70510..d06d648 100644 --- a/contrib/seccomp/seccomp_default_unsupported.go +++ b/contrib/seccomp/seccomp_default_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/debian/README.Debian b/debian/README.Debian deleted file mode 100644 index 3251b85..0000000 --- a/debian/README.Debian +++ /dev/null @@ -1,8 +0,0 @@ -containerd for Debian - -Please edit this to provide information specific to -this containerd Debian package. - - (Automatically generated by debmake Version 4.3.1) - - -- Luoyaoming Fri, 30 Dec 2022 13:48:35 +0800 diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index 87aa0e4..0000000 --- a/debian/changelog +++ /dev/null @@ -1,35 +0,0 @@ -containerd (1.5.9-ok6) yangtze; urgency=high - - * fat_time CVE-2022-31030 安全更新:Apache containerd 存在资源管理错误漏洞,该漏洞源于未正确控制 ExecSync API 中内部资源的消耗. - - -- wangdong Thu, 09 Mar 2023 18:23:38 +0800 - -containerd (1.5.9-ok5) yangtze; urgency=high - - * eric-teng CVE-2022-24778 安全更新:应用CheckAuthorization函数处理带有 ManifestList 的图像并且本地主机的体系结构不是 ManifestList的情况会发生故障 - - -- lch Wed, 08 Mar 2023 00:31:13 +0800 - -containerd (1.5.9-ok4) yangtze; urgency=medium - - * eric-teng CVE-2022-24769 安全更新:在20.10.14版之前的Moby(Docker Engine)中发现了一个错误 - - -- dht Fri, 03 Mar 2023 12:51:31 +0800 - -containerd (1.5.9-ok3) yangtze; urgency=medium - - * xie_shang CVE-2022-23471 安全更新:containerd 1.6.12之前版本、1.5.16之前版本中存在资源管理错误漏洞. - - -- hjf Mon, 27 Feb 2023 17:06:57 +0800 - -containerd (1.5.9-ok2) yangtze; urgency=medium - - * Update version info. - - -- Luoyaoming Fri, 30 Dec 2022 14:22:46 +0800 - -containerd (1.5.9-ok1) yangtze; urgency=low - - * Initial release. - - -- Luoyaoming Fri, 30 Dec 2022 13:48:35 +0800 diff --git a/debian/clean b/debian/clean deleted file mode 100644 index b938377..0000000 --- a/debian/clean +++ /dev/null @@ -1,3 +0,0 @@ -.gocache/ -.gopath/ -man/ diff --git a/debian/compat b/debian/compat deleted file mode 100644 index b4de394..0000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -11 diff --git a/debian/containerd.docs b/debian/containerd.docs deleted file mode 100644 index ea33e1b..0000000 --- a/debian/containerd.docs +++ /dev/null @@ -1,3 +0,0 @@ -README.md -docs/*.md -docs/*.pdf diff --git a/debian/containerd.install b/debian/containerd.install deleted file mode 100644 index a65408f..0000000 --- a/debian/containerd.install +++ /dev/null @@ -1,2 +0,0 @@ -usr/bin -usr/share/man diff --git a/debian/containerd.service b/debian/containerd.service deleted file mode 120000 index 9125c4f..0000000 --- a/debian/containerd.service +++ /dev/null @@ -1 +0,0 @@ -../containerd.service \ No newline at end of file diff --git a/debian/control b/debian/control deleted file mode 100644 index 2cc2665..0000000 --- a/debian/control +++ /dev/null @@ -1,42 +0,0 @@ -Source: containerd -Section: admin -Priority: optional -Maintainer: Openkylin Developers -XSBC-Original-Maintainer: Debian Go Packaging Team -Uploaders: Luo Yaoming -Build-Depends: debhelper (>= 11), - dh-golang, - go-md2man, - golang-go, - libbtrfs-dev | btrfs-progs (<< 4.16.1~), - libseccomp-dev, - pkg-config -Standards-Version: 3.9.7 -Homepage: https://containerd.io -Vcs-Git: https://gitee/openkylin/containerd.git -Vcs-Browser: https://gitee/openkylin/containerd -XS-Go-Import-Path: github.com/containerd/containerd - -Package: containerd -Architecture: linux-any -Depends: runc (>= 1.0.0~rc2~), ${misc:Depends}, ${shlibs:Depends} -Breaks: docker.io (<< 19.03.13-0) -Built-Using: ${misc:Built-Using} -Description: daemon to control runC - Containerd is a daemon to control runC, built for performance and density. - Containerd leverages runC's advanced features such as seccomp and user - namespace support as well as checkpoint and restore for cloning and live - migration of containers. - . - This package contains the binaries. - -Package: golang-github-containerd-containerd-dev -Architecture: all -Depends: ${misc:Depends} -Description: runC develpoment files - Containerd is a daemon to control runC, built for performance and density. - Containerd leverages runC's advanced features such as seccomp and user - namespace support as well as checkpoint and restore for cloning and live - migration of containers. - . - This package provides development files. diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index fab2ed6..0000000 --- a/debian/copyright +++ /dev/null @@ -1,11777 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: containerd -Source: -# -# Please double check copyright with the licensecheck(1) command. - -Files: api/events/container.proto - api/events/content.proto - api/events/image.proto - api/events/namespace.proto - api/events/snapshot.proto - api/events/task.proto - api/services/containers/v1/containers.proto - api/services/content/v1/content.proto - api/services/diff/v1/diff.proto - api/services/events/v1/events.proto - api/services/images/v1/docs.go - api/services/images/v1/images.proto - api/services/introspection/v1/doc.go - api/services/introspection/v1/introspection.proto - api/services/leases/v1/doc.go - api/services/leases/v1/leases.proto - api/services/namespaces/v1/namespace.proto - api/services/snapshots/v1/snapshots.proto - api/services/tasks/v1/tasks.proto - api/services/ttrpc/events/v1/events.proto - api/services/version/v1/version.proto - api/types/descriptor.proto - api/types/doc.go - api/types/metrics.proto - api/types/mount.proto - api/types/platform.proto - api/types/task/task.proto - archive/compression/compression.go - archive/compression/compression_test.go - archive/issues_test.go - archive/tar.go - archive/tar_freebsd.go - archive/tar_linux_test.go - archive/tar_mostunix.go - archive/tar_opts.go - archive/tar_opts_linux.go - archive/tar_opts_windows.go - archive/tar_test.go - archive/tar_unix.go - archive/tar_windows.go - archive/tartest/tar.go - archive/time.go - archive/time_unix.go - archive/time_windows.go - cio/io.go - cio/io_test.go - cio/io_unix.go - cio/io_unix_test.go - cio/io_windows.go - cio/io_windows_test.go - client.go - client_opts.go - cmd/containerd-shim-runc-v1/main.go - cmd/containerd-shim-runc-v2/main.go - cmd/containerd-shim/main_unix.go - cmd/containerd-shim/shim_darwin.go - cmd/containerd-shim/shim_freebsd.go - cmd/containerd-shim/shim_linux.go - cmd/containerd-stress/density.go - cmd/containerd-stress/exec_worker.go - cmd/containerd-stress/main.go - cmd/containerd-stress/rlimit_freebsd.go - cmd/containerd-stress/rlimit_unix.go - cmd/containerd-stress/rlimit_windows.go - cmd/containerd-stress/size.go - cmd/containerd-stress/worker.go - cmd/containerd/builtins.go - cmd/containerd/builtins_aufs_linux.go - cmd/containerd/builtins_btrfs_linux.go - cmd/containerd/builtins_cri.go - cmd/containerd/builtins_devmapper_linux.go - cmd/containerd/builtins_freebsd.go - cmd/containerd/builtins_linux.go - cmd/containerd/builtins_unix.go - cmd/containerd/builtins_windows.go - cmd/containerd/builtins_zfs_linux.go - cmd/containerd/command/config.go - cmd/containerd/command/config_linux.go - cmd/containerd/command/config_unsupported.go - cmd/containerd/command/config_windows.go - cmd/containerd/command/main.go - cmd/containerd/command/main_unix.go - cmd/containerd/command/main_windows.go - cmd/containerd/command/notify_linux.go - cmd/containerd/command/notify_unsupported.go - cmd/containerd/command/oci-hook.go - cmd/containerd/command/publish.go - cmd/containerd/command/service_unsupported.go - cmd/containerd/command/service_windows.go - cmd/containerd/main.go - cmd/ctr/app/main.go - cmd/ctr/app/main_unix.go - cmd/ctr/commands/client.go - cmd/ctr/commands/commands.go - cmd/ctr/commands/commands_unix.go - cmd/ctr/commands/commands_windows.go - cmd/ctr/commands/containers/checkpoint.go - cmd/ctr/commands/containers/containers.go - cmd/ctr/commands/containers/restore.go - cmd/ctr/commands/content/content.go - cmd/ctr/commands/content/fetch.go - cmd/ctr/commands/content/prune.go - cmd/ctr/commands/events/events.go - cmd/ctr/commands/images/convert.go - cmd/ctr/commands/images/export.go - cmd/ctr/commands/images/images.go - cmd/ctr/commands/images/import.go - cmd/ctr/commands/images/mount.go - cmd/ctr/commands/images/pull.go - cmd/ctr/commands/images/push.go - cmd/ctr/commands/images/tag.go - cmd/ctr/commands/images/unmount.go - cmd/ctr/commands/install/install.go - cmd/ctr/commands/leases/leases.go - cmd/ctr/commands/namespaces/namespaces.go - cmd/ctr/commands/namespaces/namespaces_linux.go - cmd/ctr/commands/namespaces/namespaces_other.go - cmd/ctr/commands/oci/oci.go - cmd/ctr/commands/plugins/plugins.go - cmd/ctr/commands/pprof/pprof.go - cmd/ctr/commands/pprof/pprof_unix.go - cmd/ctr/commands/pprof/pprof_windows.go - cmd/ctr/commands/resolver.go - cmd/ctr/commands/run/run.go - cmd/ctr/commands/run/run_unix.go - cmd/ctr/commands/run/run_windows.go - cmd/ctr/commands/shim/io_unix.go - cmd/ctr/commands/shim/shim.go - cmd/ctr/commands/signals.go - cmd/ctr/commands/signals_linux.go - cmd/ctr/commands/signals_notlinux.go - cmd/ctr/commands/snapshots/snapshots.go - cmd/ctr/commands/tasks/attach.go - cmd/ctr/commands/tasks/checkpoint.go - cmd/ctr/commands/tasks/delete.go - cmd/ctr/commands/tasks/exec.go - cmd/ctr/commands/tasks/kill.go - cmd/ctr/commands/tasks/list.go - cmd/ctr/commands/tasks/metrics.go - cmd/ctr/commands/tasks/pause.go - cmd/ctr/commands/tasks/ps.go - cmd/ctr/commands/tasks/resume.go - cmd/ctr/commands/tasks/start.go - cmd/ctr/commands/tasks/tasks.go - cmd/ctr/commands/tasks/tasks_unix.go - cmd/ctr/commands/tasks/tasks_windows.go - cmd/ctr/commands/utils.go - cmd/ctr/commands/version/version.go - cmd/ctr/main.go - cmd/gen-manpages/main.go - cmd/protoc-gen-gogoctrd/customnameid.go - cmd/protoc-gen-gogoctrd/main.go - container.go - container_checkpoint_opts.go - container_opts.go - container_opts_unix.go - container_restore_opts.go - containerd.service - containers/containers.go - containerstore.go - content/adaptor.go - content/content.go - content/helpers.go - content/helpers_test.go - content/local/locks.go - content/local/locks_test.go - content/local/readerat.go - content/local/store.go - content/local/store_bsd.go - content/local/store_openbsd.go - content/local/store_test.go - content/local/store_unix.go - content/local/store_windows.go - content/local/writer.go - content/proxy/content_reader.go - content/proxy/content_store.go - content/proxy/content_writer.go - content/testsuite/testsuite.go - contrib/apparmor/apparmor.go - contrib/apparmor/apparmor_test.go - contrib/apparmor/apparmor_unsupported.go - contrib/apparmor/template.go - contrib/fuzz/filters_fuzzers.go - contrib/fuzz/platforms_fuzzers.go - contrib/gce/configure.sh - contrib/nvidia/nvidia.go - contrib/seccomp/seccomp.go - contrib/seccomp/seccomp_default.go - contrib/seccomp/seccomp_default_unsupported.go - contrib/snapshotservice/service.go - defaults/defaults.go - defaults/defaults_unix.go - defaults/defaults_windows.go - diff.go - diff/apply/apply.go - diff/apply/apply_linux.go - diff/apply/apply_linux_test.go - diff/apply/apply_other.go - diff/diff.go - diff/lcow/lcow.go - diff/stream.go - diff/stream_unix.go - diff/stream_windows.go - diff/walking/differ.go - diff/walking/plugin/plugin.go - diff/windows/windows.go - errdefs/grpc.go - errdefs/grpc_test.go - events.go - events/events.go - events/exchange/exchange.go - events/exchange/exchange_test.go - export.go - filters/adaptor.go - filters/filter_test.go - filters/parser.go - filters/quote.go - filters/scanner.go - filters/scanner_test.go - gc/gc_test.go - gc/scheduler/scheduler.go - gc/scheduler/scheduler_test.go - grpc.go - identifiers/validate_test.go - image.go - image_store.go - images/annotations.go - images/archive/exporter.go - images/archive/reference.go - images/converter/default.go - images/converter/uncompress/uncompress.go - images/diffid.go - images/handlers.go - images/image.go - images/image_test.go - images/importexport.go - images/mediatypes.go - import.go - install.go - install_opts.go - integration/addition_gids_test.go - integration/client/benchmark_test.go - integration/client/client_test.go - integration/client/client_ttrpc_test.go - integration/client/client_unix_test.go - integration/client/client_windows_test.go - integration/client/container_checkpoint_test.go - integration/client/container_linux_test.go - integration/client/container_test.go - integration/client/content_test.go - integration/client/convert_test.go - integration/client/daemon_config_linux_test.go - integration/client/daemon_test.go - integration/client/export_test.go - integration/client/helpers_unix_test.go - integration/client/helpers_windows_test.go - integration/client/image_test.go - integration/client/import_test.go - integration/client/lease_test.go - integration/client/restart_monitor_linux_test.go - integration/client/signals_test.go - integration/client/snapshot_test.go - integration/client/task_opts_unix_test.go - integration/common.go - integration/container_log_test.go - integration/container_restart_test.go - integration/container_stats_test.go - integration/container_stop_test.go - integration/container_update_resources_test.go - integration/container_without_image_ref_test.go - integration/containerd_image_test.go - integration/duplicate_name_test.go - integration/image_load_test.go - integration/imagefs_info_test.go - integration/images/volume-copy-up/Dockerfile - integration/images/volume-copy-up/Makefile - integration/images/volume-ownership/Dockerfile - integration/images/volume-ownership/Makefile - integration/main_test.go - integration/no_metadata_test.go - integration/pod_dualstack_test.go - integration/pod_hostname_test.go - integration/restart_test.go - integration/runtime_handler_test.go - integration/sandbox_clean_remove_test.go - integration/truncindex_test.go - integration/volume_copy_up_test.go - labels/labels.go - labels/validate.go - labels/validate_test.go - lease.go - leases/context.go - leases/grpc.go - leases/id.go - leases/lease.go - leases/proxy/manager.go - log/context.go - log/context_test.go - log/logtest/context.go - log/logtest/log_hook.go - metadata/adaptors.go - metadata/bolt.go - metadata/boltutil/helpers.go - metadata/containers.go - metadata/containers_test.go - metadata/content.go - metadata/content_test.go - metadata/db.go - metadata/db_test.go - metadata/gc.go - metadata/gc_test.go - metadata/images.go - metadata/images_test.go - metadata/leases.go - metadata/leases_test.go - metadata/migrations.go - metadata/namespaces.go - metadata/namespaces_test.go - metadata/snapshot.go - metadata/snapshot_test.go - metrics/cgroups/cgroups.go - metrics/cgroups/v1/blkio.go - metrics/cgroups/v1/cgroups.go - metrics/cgroups/v1/cpu.go - metrics/cgroups/v1/hugetlb.go - metrics/cgroups/v1/memory.go - metrics/cgroups/v1/metric.go - metrics/cgroups/v1/metrics.go - metrics/cgroups/v1/oom.go - metrics/cgroups/v1/pids.go - metrics/cgroups/v2/cgroups.go - metrics/cgroups/v2/cpu.go - metrics/cgroups/v2/io.go - metrics/cgroups/v2/memory.go - metrics/cgroups/v2/metric.go - metrics/cgroups/v2/metrics.go - metrics/cgroups/v2/pids.go - metrics/types/v1/types.go - metrics/types/v2/types.go - mount/lookup_linux_test.go - mount/lookup_unix.go - mount/lookup_unsupported.go - mount/losetup_linux.go - mount/losetup_linux_test.go - mount/mount.go - mount/mount_freebsd.go - mount/mount_linux.go - mount/mount_linux_test.go - mount/mount_unix.go - mount/mount_windows.go - mount/mountinfo.go - mount/temp.go - mount/temp_unix.go - mount/temp_unsupported.go - namespaces.go - namespaces/context.go - namespaces/context_test.go - namespaces/grpc.go - namespaces/store.go - namespaces/ttrpc.go - namespaces/ttrpc_test.go - oci/client.go - oci/spec.go - oci/spec_opts.go - oci/spec_opts_linux.go - oci/spec_opts_linux_test.go - oci/spec_opts_nonlinux.go - oci/spec_opts_test.go - oci/spec_opts_unix.go - oci/spec_opts_unix_test.go - oci/spec_opts_windows.go - oci/spec_opts_windows_test.go - oci/spec_test.go - oci/utils_unix.go - pkg/apparmor/apparmor.go - pkg/apparmor/apparmor_linux.go - pkg/apparmor/apparmor_unsupported.go - pkg/atomic/atomic_boolean.go - pkg/atomic/atomic_boolean_test.go - pkg/cap/cap_linux_test.go - pkg/cri/annotations/annotations.go - pkg/cri/config/config.go - pkg/cri/config/config_test.go - pkg/cri/config/config_unix.go - pkg/cri/config/config_windows.go - pkg/cri/constants/constants.go - pkg/cri/cri.go - pkg/cri/io/container_io.go - pkg/cri/io/exec_io.go - pkg/cri/io/helpers.go - pkg/cri/io/helpers_unix.go - pkg/cri/io/helpers_windows.go - pkg/cri/io/logger.go - pkg/cri/io/logger_test.go - pkg/cri/opts/container.go - pkg/cri/opts/spec.go - pkg/cri/opts/spec_linux.go - pkg/cri/opts/spec_linux_test.go - pkg/cri/opts/spec_test.go - pkg/cri/opts/spec_windows.go - pkg/cri/opts/task.go - pkg/cri/platforms/default_unix.go - pkg/cri/platforms/default_windows.go - pkg/cri/server/cni_conf_syncer.go - pkg/cri/server/container_attach.go - pkg/cri/server/container_create.go - pkg/cri/server/container_create_linux.go - pkg/cri/server/container_create_linux_test.go - pkg/cri/server/container_create_other.go - pkg/cri/server/container_create_other_test.go - pkg/cri/server/container_create_test.go - pkg/cri/server/container_create_windows.go - pkg/cri/server/container_create_windows_test.go - pkg/cri/server/container_exec.go - pkg/cri/server/container_execsync.go - pkg/cri/server/container_list.go - pkg/cri/server/container_list_test.go - pkg/cri/server/container_log_reopen.go - pkg/cri/server/container_remove.go - pkg/cri/server/container_remove_test.go - pkg/cri/server/container_start.go - pkg/cri/server/container_start_test.go - pkg/cri/server/container_stats.go - pkg/cri/server/container_stats_list.go - pkg/cri/server/container_stats_list_linux.go - pkg/cri/server/container_stats_list_linux_test.go - pkg/cri/server/container_stats_list_other.go - pkg/cri/server/container_stats_list_windows.go - pkg/cri/server/container_status.go - pkg/cri/server/container_status_test.go - pkg/cri/server/container_stop.go - pkg/cri/server/container_stop_test.go - pkg/cri/server/container_update_resources_linux.go - pkg/cri/server/container_update_resources_linux_test.go - pkg/cri/server/container_update_resources_other.go - pkg/cri/server/container_update_resources_windows.go - pkg/cri/server/events.go - pkg/cri/server/events_test.go - pkg/cri/server/helpers.go - pkg/cri/server/helpers_linux.go - pkg/cri/server/helpers_linux_test.go - pkg/cri/server/helpers_other.go - pkg/cri/server/helpers_selinux_linux_test.go - pkg/cri/server/helpers_test.go - pkg/cri/server/helpers_windows.go - pkg/cri/server/image_list.go - pkg/cri/server/image_list_test.go - pkg/cri/server/image_pull.go - pkg/cri/server/image_pull_test.go - pkg/cri/server/image_remove.go - pkg/cri/server/image_status.go - pkg/cri/server/image_status_test.go - pkg/cri/server/imagefs_info.go - pkg/cri/server/imagefs_info_test.go - pkg/cri/server/instrumented_service.go - pkg/cri/server/opts.go - pkg/cri/server/restart.go - pkg/cri/server/sandbox_list.go - pkg/cri/server/sandbox_list_test.go - pkg/cri/server/sandbox_portforward.go - pkg/cri/server/sandbox_portforward_linux.go - pkg/cri/server/sandbox_portforward_other.go - pkg/cri/server/sandbox_portforward_windows.go - pkg/cri/server/sandbox_remove.go - pkg/cri/server/sandbox_run.go - pkg/cri/server/sandbox_run_linux.go - pkg/cri/server/sandbox_run_linux_test.go - pkg/cri/server/sandbox_run_other.go - pkg/cri/server/sandbox_run_other_test.go - pkg/cri/server/sandbox_run_test.go - pkg/cri/server/sandbox_run_windows.go - pkg/cri/server/sandbox_run_windows_test.go - pkg/cri/server/sandbox_status.go - pkg/cri/server/sandbox_status_test.go - pkg/cri/server/sandbox_stop.go - pkg/cri/server/sandbox_stop_test.go - pkg/cri/server/service.go - pkg/cri/server/service_linux.go - pkg/cri/server/service_other.go - pkg/cri/server/service_test.go - pkg/cri/server/service_windows.go - pkg/cri/server/snapshots.go - pkg/cri/server/status.go - pkg/cri/server/streaming.go - pkg/cri/server/streaming_test.go - pkg/cri/server/testing/fake_cni_plugin.go - pkg/cri/server/update_runtime_config.go - pkg/cri/server/update_runtime_config_test.go - pkg/cri/server/version.go - pkg/cri/store/container/container.go - pkg/cri/store/container/container_test.go - pkg/cri/store/container/fake_status.go - pkg/cri/store/container/metadata.go - pkg/cri/store/container/metadata_test.go - pkg/cri/store/container/status.go - pkg/cri/store/container/status_test.go - pkg/cri/store/errors.go - pkg/cri/store/errors_test.go - pkg/cri/store/image/fake_image.go - pkg/cri/store/image/image.go - pkg/cri/store/image/image_test.go - pkg/cri/store/image/sort.go - pkg/cri/store/image/sort_test.go - pkg/cri/store/label/label.go - pkg/cri/store/label/label_test.go - pkg/cri/store/sandbox/metadata.go - pkg/cri/store/sandbox/metadata_test.go - pkg/cri/store/sandbox/sandbox.go - pkg/cri/store/sandbox/sandbox_test.go - pkg/cri/store/sandbox/status.go - pkg/cri/store/sandbox/status_test.go - pkg/cri/store/snapshot/snapshot.go - pkg/cri/store/snapshot/snapshot_test.go - pkg/cri/store/util.go - pkg/cri/util/deep_copy.go - pkg/cri/util/deep_copy_test.go - pkg/cri/util/id.go - pkg/cri/util/image.go - pkg/cri/util/image_test.go - pkg/cri/util/strings.go - pkg/cri/util/strings_test.go - pkg/cri/util/util.go - pkg/dialer/dialer.go - pkg/dialer/dialer_unix.go - pkg/dialer/dialer_windows.go - pkg/ioutil/read_closer.go - pkg/ioutil/read_closer_test.go - pkg/ioutil/write_closer.go - pkg/ioutil/write_closer_test.go - pkg/ioutil/writer_group.go - pkg/ioutil/writer_group_test.go - pkg/netns/netns_other.go - pkg/netns/netns_windows.go - pkg/oom/oom.go - pkg/oom/v1/v1.go - pkg/oom/v2/v2.go - pkg/os/mount_linux.go - pkg/os/mount_other.go - pkg/os/mount_unix.go - pkg/os/os.go - pkg/os/os_unix.go - pkg/os/os_windows.go - pkg/os/os_windows_test.go - pkg/os/testing/fake_os.go - pkg/os/testing/fake_os_unix.go - pkg/process/deleted_state.go - pkg/process/exec.go - pkg/process/exec_state.go - pkg/process/init.go - pkg/process/init_state.go - pkg/process/io.go - pkg/process/io_test.go - pkg/process/io_util.go - pkg/process/process.go - pkg/process/types.go - pkg/process/utils.go - pkg/progress/bar.go - pkg/progress/escape.go - pkg/progress/humaans.go - pkg/progress/writer.go - pkg/registrar/registrar.go - pkg/registrar/registrar_test.go - pkg/seccomp/seccomp.go - pkg/seccomp/seccomp_unsupported.go - pkg/seed/seed.go - pkg/seed/seed_linux.go - pkg/seed/seed_other.go - pkg/seutil/seutil.go - pkg/stdio/platform.go - pkg/stdio/stdio.go - pkg/testutil/helpers.go - pkg/testutil/helpers_unix.go - pkg/testutil/helpers_windows.go - pkg/testutil/mount_linux.go - pkg/testutil/mount_other.go - pkg/timeout/timeout.go - pkg/ttrpcutil/client.go - pkg/userns/userns_linux.go - pkg/userns/userns_unsupported.go - platforms/compare.go - platforms/compare_test.go - platforms/cpuinfo.go - platforms/cpuinfo_test.go - platforms/database.go - platforms/defaults.go - platforms/defaults_test.go - platforms/defaults_unix.go - platforms/defaults_windows.go - platforms/defaults_windows_test.go - platforms/platforms_test.go - plugin/context.go - plugin/plugin.go - plugin/plugin_go18.go - plugin/plugin_other.go - process.go - protobuf/plugin/doc.go - protobuf/plugin/fieldpath/fieldpath.go - protobuf/plugin/helpers.go - pull.go - reference/reference.go - reference/reference_test.go - remotes/docker/auth/fetch.go - remotes/docker/auth/parse.go - remotes/docker/authorizer.go - remotes/docker/config/config_unix.go - remotes/docker/config/config_windows.go - remotes/docker/config/hosts_test.go - remotes/docker/converter.go - remotes/docker/errcode.go - remotes/docker/errdesc.go - remotes/docker/fetcher.go - remotes/docker/fetcher_test.go - remotes/docker/handler.go - remotes/docker/handler_test.go - remotes/docker/httpreadseeker.go - remotes/docker/pusher.go - remotes/docker/pusher_test.go - remotes/docker/registry.go - remotes/docker/registry_test.go - remotes/docker/resolver.go - remotes/docker/resolver_test.go - remotes/docker/schema1/converter.go - remotes/docker/scope.go - remotes/docker/scope_test.go - remotes/docker/status.go - remotes/errors/errors.go - remotes/handlers.go - remotes/handlers_test.go - remotes/resolver.go - rootfs/apply.go - rootfs/diff.go - rootfs/init.go - rootfs/init_linux.go - rootfs/init_other.go - runtime/events.go - runtime/linux/runctypes/doc.go - runtime/monitor.go - runtime/opts/opts_linux.go - runtime/restart/monitor/change.go - runtime/restart/monitor/monitor.go - runtime/runtime.go - runtime/task.go - runtime/task_list.go - runtime/typeurl.go - runtime/v1/linux/bundle.go - runtime/v1/linux/bundle_test.go - runtime/v1/linux/process.go - runtime/v1/linux/runtime.go - runtime/v1/linux/task.go - runtime/v1/shim.go - runtime/v1/shim/client/client.go - runtime/v1/shim/client/client_linux.go - runtime/v1/shim/client/client_unix.go - runtime/v1/shim/local.go - runtime/v1/shim/service.go - runtime/v1/shim/service_linux.go - runtime/v1/shim/service_unix.go - runtime/v1/shim/v1/doc.go - runtime/v1/shim/v1/shim.proto - runtime/v2/binary.go - runtime/v2/bundle.go - runtime/v2/bundle_default.go - runtime/v2/bundle_linux.go - runtime/v2/bundle_linux_test.go - runtime/v2/bundle_test.go - runtime/v2/example/cmd/main.go - runtime/v2/example/example.go - runtime/v2/logging/logging.go - runtime/v2/logging/logging_unix.go - runtime/v2/logging/logging_windows.go - runtime/v2/manager.go - runtime/v2/manager_unix.go - runtime/v2/manager_windows.go - runtime/v2/process.go - runtime/v2/runc/container.go - runtime/v2/runc/options/doc.go - runtime/v2/runc/platform.go - runtime/v2/runc/util.go - runtime/v2/runc/v1/service.go - runtime/v2/runc/v2/service.go - runtime/v2/shim.go - runtime/v2/shim/publisher.go - runtime/v2/shim/shim.go - runtime/v2/shim/shim_darwin.go - runtime/v2/shim/shim_freebsd.go - runtime/v2/shim/shim_linux.go - runtime/v2/shim/shim_test.go - runtime/v2/shim/shim_unix.go - runtime/v2/shim/shim_windows.go - runtime/v2/shim/util.go - runtime/v2/shim/util_unix.go - runtime/v2/shim/util_windows.go - runtime/v2/shim_unix.go - runtime/v2/shim_unix_test.go - runtime/v2/shim_windows.go - runtime/v2/shim_windows_test.go - runtime/v2/task/doc.go - runtime/v2/task/shim.proto - script/setup/install-cni-windows - script/setup/install-gotestsum - script/test/cri-integration.sh - script/test/utils.sh - services.go - services/containers/helpers.go - services/containers/local.go - services/containers/service.go - services/content/contentserver/contentserver.go - services/content/service.go - services/content/store.go - services/diff/local.go - services/diff/service.go - services/diff/service_unix.go - services/diff/service_windows.go - services/events/service.go - services/events/ttrpc.go - services/healthcheck/service.go - services/images/helpers.go - services/images/local.go - services/images/service.go - services/introspection/introspection.go - services/introspection/local.go - services/introspection/service.go - services/leases/local.go - services/leases/service.go - services/namespaces/local.go - services/namespaces/service.go - services/opt/path_unix.go - services/opt/path_windows.go - services/opt/service.go - services/server/config/config.go - services/server/config/config_test.go - services/server/server.go - services/server/server_linux.go - services/server/server_solaris.go - services/server/server_test.go - services/server/server_unsupported.go - services/server/server_windows.go - services/services.go - services/snapshots/service.go - services/snapshots/snapshotters.go - services/tasks/local.go - services/tasks/local_freebsd.go - services/tasks/local_unix.go - services/tasks/local_windows.go - services/tasks/service.go - services/version/service.go - signals.go - signals_unix.go - signals_windows.go - snapshots/benchsuite/benchmark.go - snapshots/benchsuite/benchmark_test.go - snapshots/btrfs/btrfs.go - snapshots/btrfs/btrfs_test.go - snapshots/btrfs/plugin/plugin.go - snapshots/devmapper/config.go - snapshots/devmapper/config_test.go - snapshots/devmapper/device_info.go - snapshots/devmapper/dmsetup/dmsetup.go - snapshots/devmapper/dmsetup/dmsetup_test.go - snapshots/devmapper/metadata.go - snapshots/devmapper/metadata_test.go - snapshots/devmapper/plugin/plugin.go - snapshots/devmapper/pool_device.go - snapshots/devmapper/pool_device_test.go - snapshots/devmapper/snapshotter.go - snapshots/devmapper/snapshotter_test.go - snapshots/lcow/lcow.go - snapshots/native/native.go - snapshots/native/native_default.go - snapshots/native/native_freebsd.go - snapshots/native/native_test.go - snapshots/native/plugin/plugin.go - snapshots/overlay/overlay.go - snapshots/overlay/overlay_test.go - snapshots/overlay/overlayutils/check.go - snapshots/overlay/overlayutils/check_test.go - snapshots/overlay/plugin/plugin.go - snapshots/proxy/proxy.go - snapshots/snapshotter.go - snapshots/storage/bolt.go - snapshots/storage/bolt_test.go - snapshots/storage/metastore_bench_test.go - snapshots/storage/metastore_test.go - snapshots/testsuite/helpers.go - snapshots/testsuite/helpers_linux.go - snapshots/testsuite/helpers_other.go - snapshots/testsuite/issues.go - snapshots/testsuite/testsuite.go - snapshots/testsuite/testsuite_unix.go - snapshots/testsuite/testsuite_windows.go - snapshots/windows/windows.go - snapshotter_default_linux.go - snapshotter_default_unix.go - snapshotter_default_windows.go - snapshotter_opts_unix.go - sys/epoll.go - sys/fds.go - sys/filesys_unix.go - sys/filesys_windows.go - sys/mount_linux.go - sys/mount_linux_test.go - sys/oom_linux.go - sys/oom_linux_test.go - sys/oom_unsupported.go - sys/reaper/reaper_unix.go - sys/reaper/reaper_utils_linux.go - sys/socket_unix.go - sys/socket_windows.go - sys/stat_bsd.go - sys/stat_openbsd.go - sys/stat_unix.go - sys/subprocess_unsafe_linux.go - sys/subprocess_unsafe_linux.s - sys/userns_deprecated.go - task.go - task_opts.go - task_opts_unix.go - test/build-utils.sh - test/push.sh - test/utils.sh - unpacker.go - vendor/github.com/containerd/aufs/aufs.go - vendor/github.com/containerd/aufs/plugin/plugin.go - vendor/github.com/containerd/btrfs/Makefile - vendor/github.com/containerd/btrfs/btrfs.c - vendor/github.com/containerd/btrfs/btrfs.go - vendor/github.com/containerd/btrfs/btrfs.h - vendor/github.com/containerd/btrfs/helpers.go - vendor/github.com/containerd/btrfs/info.go - vendor/github.com/containerd/btrfs/ioctl.go - vendor/github.com/containerd/cgroups/Makefile - vendor/github.com/containerd/cgroups/blkio.go - vendor/github.com/containerd/cgroups/cgroup.go - vendor/github.com/containerd/cgroups/control.go - vendor/github.com/containerd/cgroups/cpu.go - vendor/github.com/containerd/cgroups/cpuacct.go - vendor/github.com/containerd/cgroups/cpuset.go - vendor/github.com/containerd/cgroups/devices.go - vendor/github.com/containerd/cgroups/errors.go - vendor/github.com/containerd/cgroups/freezer.go - vendor/github.com/containerd/cgroups/hierarchy.go - vendor/github.com/containerd/cgroups/hugetlb.go - vendor/github.com/containerd/cgroups/memory.go - vendor/github.com/containerd/cgroups/named.go - vendor/github.com/containerd/cgroups/net_cls.go - vendor/github.com/containerd/cgroups/net_prio.go - vendor/github.com/containerd/cgroups/opts.go - vendor/github.com/containerd/cgroups/paths.go - vendor/github.com/containerd/cgroups/perf_event.go - vendor/github.com/containerd/cgroups/pids.go - vendor/github.com/containerd/cgroups/rdma.go - vendor/github.com/containerd/cgroups/state.go - vendor/github.com/containerd/cgroups/stats/v1/doc.go - vendor/github.com/containerd/cgroups/subsystem.go - vendor/github.com/containerd/cgroups/systemd.go - vendor/github.com/containerd/cgroups/ticks.go - vendor/github.com/containerd/cgroups/utils.go - vendor/github.com/containerd/cgroups/v1.go - vendor/github.com/containerd/cgroups/v2/cpu.go - vendor/github.com/containerd/cgroups/v2/ebpf.go - vendor/github.com/containerd/cgroups/v2/errors.go - vendor/github.com/containerd/cgroups/v2/hugetlb.go - vendor/github.com/containerd/cgroups/v2/io.go - vendor/github.com/containerd/cgroups/v2/manager.go - vendor/github.com/containerd/cgroups/v2/memory.go - vendor/github.com/containerd/cgroups/v2/paths.go - vendor/github.com/containerd/cgroups/v2/pids.go - vendor/github.com/containerd/cgroups/v2/rdma.go - vendor/github.com/containerd/cgroups/v2/state.go - vendor/github.com/containerd/cgroups/v2/stats/doc.go - vendor/github.com/containerd/cgroups/v2/utils.go - vendor/github.com/containerd/console/console.go - vendor/github.com/containerd/console/console_linux.go - vendor/github.com/containerd/console/console_unix.go - vendor/github.com/containerd/console/console_windows.go - vendor/github.com/containerd/console/pty_freebsd_cgo.go - vendor/github.com/containerd/console/pty_freebsd_nocgo.go - vendor/github.com/containerd/console/pty_unix.go - vendor/github.com/containerd/console/tc_darwin.go - vendor/github.com/containerd/console/tc_freebsd_cgo.go - vendor/github.com/containerd/console/tc_freebsd_nocgo.go - vendor/github.com/containerd/console/tc_linux.go - vendor/github.com/containerd/console/tc_netbsd.go - vendor/github.com/containerd/console/tc_openbsd_cgo.go - vendor/github.com/containerd/console/tc_solaris_cgo.go - vendor/github.com/containerd/console/tc_unix.go - vendor/github.com/containerd/continuity/context.go - vendor/github.com/containerd/continuity/devices/devices.go - vendor/github.com/containerd/continuity/devices/devices_unix.go - vendor/github.com/containerd/continuity/devices/devices_windows.go - vendor/github.com/containerd/continuity/devices/mknod_freebsd.go - vendor/github.com/containerd/continuity/devices/mknod_unix.go - vendor/github.com/containerd/continuity/digests.go - vendor/github.com/containerd/continuity/driver/driver.go - vendor/github.com/containerd/continuity/driver/driver_unix.go - vendor/github.com/containerd/continuity/driver/lchmod_linux.go - vendor/github.com/containerd/continuity/driver/lchmod_unix.go - vendor/github.com/containerd/continuity/driver/utils.go - vendor/github.com/containerd/continuity/fs/copy.go - vendor/github.com/containerd/continuity/fs/copy_darwinopenbsdsolaris.go - vendor/github.com/containerd/continuity/fs/copy_freebsd.go - vendor/github.com/containerd/continuity/fs/copy_linux.go - vendor/github.com/containerd/continuity/fs/copy_unix.go - vendor/github.com/containerd/continuity/fs/copy_windows.go - vendor/github.com/containerd/continuity/fs/diff.go - vendor/github.com/containerd/continuity/fs/diff_unix.go - vendor/github.com/containerd/continuity/fs/diff_windows.go - vendor/github.com/containerd/continuity/fs/dtype_linux.go - vendor/github.com/containerd/continuity/fs/du.go - vendor/github.com/containerd/continuity/fs/du_unix.go - vendor/github.com/containerd/continuity/fs/du_windows.go - vendor/github.com/containerd/continuity/fs/fstest/compare.go - vendor/github.com/containerd/continuity/fs/fstest/compare_unix.go - vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go - vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go - vendor/github.com/containerd/continuity/fs/fstest/file.go - vendor/github.com/containerd/continuity/fs/fstest/file_unix.go - vendor/github.com/containerd/continuity/fs/fstest/file_windows.go - vendor/github.com/containerd/continuity/fs/fstest/testsuite.go - vendor/github.com/containerd/continuity/fs/hardlink.go - vendor/github.com/containerd/continuity/fs/hardlink_unix.go - vendor/github.com/containerd/continuity/fs/hardlink_windows.go - vendor/github.com/containerd/continuity/fs/path.go - vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go - vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go - vendor/github.com/containerd/continuity/fs/time.go - vendor/github.com/containerd/continuity/hardlinks.go - vendor/github.com/containerd/continuity/hardlinks_unix.go - vendor/github.com/containerd/continuity/hardlinks_windows.go - vendor/github.com/containerd/continuity/ioutils.go - vendor/github.com/containerd/continuity/manifest.go - vendor/github.com/containerd/continuity/pathdriver/path_driver.go - vendor/github.com/containerd/continuity/proto/gen.go - vendor/github.com/containerd/continuity/resource.go - vendor/github.com/containerd/continuity/resource_unix.go - vendor/github.com/containerd/continuity/resource_windows.go - vendor/github.com/containerd/continuity/sysx/generate.sh - vendor/github.com/containerd/continuity/sysx/nodata_linux.go - vendor/github.com/containerd/continuity/sysx/nodata_solaris.go - vendor/github.com/containerd/continuity/sysx/nodata_unix.go - vendor/github.com/containerd/continuity/sysx/xattr.go - vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go - vendor/github.com/containerd/continuity/testutil/helpers.go - vendor/github.com/containerd/continuity/testutil/helpers_unix.go - vendor/github.com/containerd/continuity/testutil/helpers_windows.go - vendor/github.com/containerd/continuity/testutil/loopback/loopback_linux.go - vendor/github.com/containerd/continuity/testutil/mount_linux.go - vendor/github.com/containerd/continuity/testutil/mount_other.go - vendor/github.com/containerd/fifo/Makefile - vendor/github.com/containerd/fifo/errors.go - vendor/github.com/containerd/fifo/fifo.go - vendor/github.com/containerd/fifo/handle_linux.go - vendor/github.com/containerd/fifo/handle_nolinux.go - vendor/github.com/containerd/fifo/raw.go - vendor/github.com/containerd/fifo/utils.go - vendor/github.com/containerd/go-cni/cni.go - vendor/github.com/containerd/go-cni/deprecated.go - vendor/github.com/containerd/go-cni/errors.go - vendor/github.com/containerd/go-cni/helper.go - vendor/github.com/containerd/go-cni/namespace.go - vendor/github.com/containerd/go-cni/namespace_opts.go - vendor/github.com/containerd/go-cni/opts.go - vendor/github.com/containerd/go-cni/result.go - vendor/github.com/containerd/go-cni/testutils.go - vendor/github.com/containerd/go-cni/types.go - vendor/github.com/containerd/go-runc/command_linux.go - vendor/github.com/containerd/go-runc/command_other.go - vendor/github.com/containerd/go-runc/console.go - vendor/github.com/containerd/go-runc/container.go - vendor/github.com/containerd/go-runc/events.go - vendor/github.com/containerd/go-runc/io.go - vendor/github.com/containerd/go-runc/io_unix.go - vendor/github.com/containerd/go-runc/io_windows.go - vendor/github.com/containerd/go-runc/monitor.go - vendor/github.com/containerd/go-runc/runc.go - vendor/github.com/containerd/go-runc/runc_unix.go - vendor/github.com/containerd/go-runc/runc_windows.go - vendor/github.com/containerd/go-runc/utils.go - vendor/github.com/containerd/imgcrypt/images/encryption/client.go - vendor/github.com/containerd/imgcrypt/images/encryption/encryption.go - vendor/github.com/containerd/imgcrypt/payload.go - vendor/github.com/containerd/nri/Makefile - vendor/github.com/containerd/nri/client.go - vendor/github.com/containerd/nri/types/v1/types.go - vendor/github.com/containerd/ttrpc/channel.go - vendor/github.com/containerd/ttrpc/client.go - vendor/github.com/containerd/ttrpc/codec.go - vendor/github.com/containerd/ttrpc/config.go - vendor/github.com/containerd/ttrpc/handshake.go - vendor/github.com/containerd/ttrpc/interceptor.go - vendor/github.com/containerd/ttrpc/metadata.go - vendor/github.com/containerd/ttrpc/plugin/generator.go - vendor/github.com/containerd/ttrpc/server.go - vendor/github.com/containerd/ttrpc/services.go - vendor/github.com/containerd/ttrpc/types.go - vendor/github.com/containerd/ttrpc/unixcreds_linux.go - vendor/github.com/containerd/typeurl/doc.go - vendor/github.com/containerd/typeurl/types.go - vendor/github.com/containerd/zfs/plugin/plugin.go - vendor/github.com/containerd/zfs/zfs.go - vendor/github.com/containernetworking/cni/libcni/api.go - vendor/github.com/containernetworking/cni/libcni/conf.go - vendor/github.com/containernetworking/cni/pkg/invoke/args.go - vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go - vendor/github.com/containernetworking/cni/pkg/invoke/exec.go - vendor/github.com/containernetworking/cni/pkg/invoke/find.go - vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go - vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go - vendor/github.com/containernetworking/cni/pkg/types/020/types.go - vendor/github.com/containernetworking/cni/pkg/types/args.go - vendor/github.com/containernetworking/cni/pkg/types/current/types.go - vendor/github.com/containernetworking/cni/pkg/types/types.go - vendor/github.com/containernetworking/cni/pkg/utils/utils.go - vendor/github.com/containernetworking/cni/pkg/version/conf.go - vendor/github.com/containernetworking/cni/pkg/version/plugin.go - vendor/github.com/containernetworking/cni/pkg/version/reconcile.go - vendor/github.com/containernetworking/cni/pkg/version/version.go - vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go - vendor/github.com/containers/ocicrypt/Makefile - vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go - vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go - vendor/github.com/containers/ocicrypt/config/config.go - vendor/github.com/containers/ocicrypt/config/constructors.go - vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go - vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go - vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go - vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go - vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go - vendor/github.com/containers/ocicrypt/encryption.go - vendor/github.com/containers/ocicrypt/gpg.go - vendor/github.com/containers/ocicrypt/gpgvault.go - vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go - vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go - vendor/github.com/containers/ocicrypt/keywrap/keywrap.go - vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go - vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go - vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go - vendor/github.com/containers/ocicrypt/reader.go - vendor/github.com/containers/ocicrypt/utils/delayedreader.go - vendor/github.com/containers/ocicrypt/utils/ioutils.go - vendor/github.com/containers/ocicrypt/utils/testing.go - vendor/github.com/containers/ocicrypt/utils/utils.go - vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go - vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go - vendor/github.com/coreos/go-systemd/v22/dbus/methods.go - vendor/github.com/coreos/go-systemd/v22/dbus/properties.go - vendor/github.com/coreos/go-systemd/v22/dbus/set.go - vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go - vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go - vendor/github.com/gogo/googleapis/google/rpc/code.proto - vendor/github.com/gogo/googleapis/google/rpc/error_details.proto - vendor/github.com/gogo/googleapis/google/rpc/status.proto - vendor/github.com/google/gofuzz/fuzz.go - vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go - vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go - vendor/github.com/opencontainers/image-spec/identity/helpers.go - vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go - vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go - vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go - vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go - vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go - vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go - vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go - vendor/github.com/opencontainers/image-spec/specs-go/version.go - vendor/github.com/opencontainers/image-spec/specs-go/versioned.go - vendor/github.com/prometheus/client_golang/prometheus/collector.go - vendor/github.com/prometheus/client_golang/prometheus/counter.go - vendor/github.com/prometheus/client_golang/prometheus/desc.go - vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go - vendor/github.com/prometheus/client_golang/prometheus/fnv.go - vendor/github.com/prometheus/client_golang/prometheus/gauge.go - vendor/github.com/prometheus/client_golang/prometheus/go_collector.go - vendor/github.com/prometheus/client_golang/prometheus/histogram.go - vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go - vendor/github.com/prometheus/client_golang/prometheus/labels.go - vendor/github.com/prometheus/client_golang/prometheus/metric.go - vendor/github.com/prometheus/client_golang/prometheus/observer.go - vendor/github.com/prometheus/client_golang/prometheus/process_collector.go - vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go - vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go - vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go - vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go - vendor/github.com/prometheus/client_golang/prometheus/registry.go - vendor/github.com/prometheus/client_golang/prometheus/summary.go - vendor/github.com/prometheus/client_golang/prometheus/timer.go - vendor/github.com/prometheus/client_golang/prometheus/untyped.go - vendor/github.com/prometheus/client_golang/prometheus/value.go - vendor/github.com/prometheus/client_golang/prometheus/vec.go - vendor/github.com/prometheus/client_golang/prometheus/wrap.go - vendor/github.com/prometheus/common/expfmt/decode.go - vendor/github.com/prometheus/common/expfmt/encode.go - vendor/github.com/prometheus/common/expfmt/openmetrics_create.go - vendor/github.com/prometheus/common/expfmt/text_create.go - vendor/github.com/prometheus/common/expfmt/text_parse.go - vendor/github.com/prometheus/common/model/alert.go - vendor/github.com/prometheus/common/model/fingerprinting.go - vendor/github.com/prometheus/common/model/fnv.go - vendor/github.com/prometheus/common/model/labels.go - vendor/github.com/prometheus/common/model/labelset.go - vendor/github.com/prometheus/common/model/metric.go - vendor/github.com/prometheus/common/model/signature.go - vendor/github.com/prometheus/common/model/silence.go - vendor/github.com/prometheus/common/model/time.go - vendor/github.com/prometheus/common/model/value.go - vendor/github.com/prometheus/procfs/Makefile - vendor/github.com/prometheus/procfs/arp.go - vendor/github.com/prometheus/procfs/buddyinfo.go - vendor/github.com/prometheus/procfs/crypto.go - vendor/github.com/prometheus/procfs/fs.go - vendor/github.com/prometheus/procfs/fscache.go - vendor/github.com/prometheus/procfs/internal/fs/fs.go - vendor/github.com/prometheus/procfs/internal/util/parse.go - vendor/github.com/prometheus/procfs/internal/util/readfile.go - vendor/github.com/prometheus/procfs/internal/util/valueparser.go - vendor/github.com/prometheus/procfs/ipvs.go - vendor/github.com/prometheus/procfs/loadavg.go - vendor/github.com/prometheus/procfs/mdstat.go - vendor/github.com/prometheus/procfs/meminfo.go - vendor/github.com/prometheus/procfs/mountinfo.go - vendor/github.com/prometheus/procfs/mountstats.go - vendor/github.com/prometheus/procfs/net_conntrackstat.go - vendor/github.com/prometheus/procfs/net_dev.go - vendor/github.com/prometheus/procfs/net_ip_socket.go - vendor/github.com/prometheus/procfs/net_protocols.go - vendor/github.com/prometheus/procfs/net_sockstat.go - vendor/github.com/prometheus/procfs/net_softnet.go - vendor/github.com/prometheus/procfs/net_tcp.go - vendor/github.com/prometheus/procfs/net_udp.go - vendor/github.com/prometheus/procfs/net_unix.go - vendor/github.com/prometheus/procfs/proc.go - vendor/github.com/prometheus/procfs/proc_cgroup.go - vendor/github.com/prometheus/procfs/proc_environ.go - vendor/github.com/prometheus/procfs/proc_fdinfo.go - vendor/github.com/prometheus/procfs/proc_io.go - vendor/github.com/prometheus/procfs/proc_limits.go - vendor/github.com/prometheus/procfs/proc_ns.go - vendor/github.com/prometheus/procfs/proc_psi.go - vendor/github.com/prometheus/procfs/proc_stat.go - vendor/github.com/prometheus/procfs/proc_status.go - vendor/github.com/prometheus/procfs/schedstat.go - vendor/github.com/prometheus/procfs/slab.go - vendor/github.com/prometheus/procfs/stat.go - vendor/github.com/prometheus/procfs/swaps.go - vendor/github.com/prometheus/procfs/ttar - vendor/github.com/prometheus/procfs/xfrm.go - vendor/github.com/stefanberger/go-pkcs11uri/Makefile - vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go - vendor/go.opencensus.io/internal/internal.go - vendor/go.opencensus.io/internal/sanitize.go - vendor/go.opencensus.io/internal/traceinternals.go - vendor/go.opencensus.io/trace/basetypes.go - vendor/go.opencensus.io/trace/config.go - vendor/go.opencensus.io/trace/evictedqueue.go - vendor/go.opencensus.io/trace/export.go - vendor/go.opencensus.io/trace/lrumap.go - vendor/go.opencensus.io/trace/sampling.go - vendor/go.opencensus.io/trace/spanbucket.go - vendor/go.opencensus.io/trace/spanstore.go - vendor/go.opencensus.io/trace/status_codes.go - vendor/go.opencensus.io/trace/trace.go - vendor/google.golang.org/grpc/balancer.go - vendor/google.golang.org/grpc/balancer/base/balancer.go - vendor/google.golang.org/grpc/balancer_conn_wrappers.go - vendor/google.golang.org/grpc/balancer_v1_wrapper.go - vendor/google.golang.org/grpc/call.go - vendor/google.golang.org/grpc/clientconn.go - vendor/google.golang.org/grpc/codec.go - vendor/google.golang.org/grpc/codes/code_string.go - vendor/google.golang.org/grpc/credentials/go12.go - vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go - vendor/google.golang.org/grpc/credentials/tls.go - vendor/google.golang.org/grpc/dialoptions.go - vendor/google.golang.org/grpc/grpclog/logger.go - vendor/google.golang.org/grpc/grpclog/loggerv2.go - vendor/google.golang.org/grpc/health/client.go - vendor/google.golang.org/grpc/health/regenerate.sh - vendor/google.golang.org/grpc/interceptor.go - vendor/google.golang.org/grpc/internal/binarylog/env_config.go - vendor/google.golang.org/grpc/internal/binarylog/method_logger.go - vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh - vendor/google.golang.org/grpc/internal/binarylog/sink.go - vendor/google.golang.org/grpc/internal/binarylog/util.go - vendor/google.golang.org/grpc/internal/channelz/types.go - vendor/google.golang.org/grpc/internal/channelz/types_linux.go - vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go - vendor/google.golang.org/grpc/internal/channelz/util_linux.go - vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go - vendor/google.golang.org/grpc/internal/resolver/dns/go113.go - vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go - vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go - vendor/google.golang.org/grpc/internal/transport/controlbuf.go - vendor/google.golang.org/grpc/internal/transport/defaults.go - vendor/google.golang.org/grpc/internal/transport/flowcontrol.go - vendor/google.golang.org/grpc/internal/transport/http2_client.go - vendor/google.golang.org/grpc/internal/transport/http2_server.go - vendor/google.golang.org/grpc/internal/transport/http_util.go - vendor/google.golang.org/grpc/naming/dns_resolver.go - vendor/google.golang.org/grpc/picker_wrapper.go - vendor/google.golang.org/grpc/pickfirst.go - vendor/google.golang.org/grpc/preloader.go - vendor/google.golang.org/grpc/proxy.go - vendor/google.golang.org/grpc/resolver_conn_wrapper.go - vendor/google.golang.org/grpc/rpc_util.go - vendor/google.golang.org/grpc/server.go - vendor/google.golang.org/grpc/service_config.go - vendor/google.golang.org/grpc/stats/handlers.go - vendor/google.golang.org/grpc/stats/stats.go - vendor/google.golang.org/grpc/stream.go - vendor/google.golang.org/grpc/trace.go - vendor/google.golang.org/grpc/version.go - vendor/gopkg.in/square/go-jose.v2/asymmetric.go - vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go - vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go - vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go - vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go - vendor/gopkg.in/square/go-jose.v2/crypter.go - vendor/gopkg.in/square/go-jose.v2/encoding.go - vendor/gopkg.in/square/go-jose.v2/jwe.go - vendor/gopkg.in/square/go-jose.v2/jwk.go - vendor/gopkg.in/square/go-jose.v2/jws.go - vendor/gopkg.in/square/go-jose.v2/opaque.go - vendor/gopkg.in/square/go-jose.v2/shared.go - vendor/gopkg.in/square/go-jose.v2/signing.go - vendor/gopkg.in/square/go-jose.v2/symmetric.go - vendor/gopkg.in/yaml.v2/NOTICE - vendor/gopkg.in/yaml.v3/NOTICE - vendor/gopkg.in/yaml.v3/decode.go - vendor/gopkg.in/yaml.v3/encode.go - vendor/gopkg.in/yaml.v3/resolve.go - vendor/gopkg.in/yaml.v3/sorter.go - vendor/k8s.io/api/core/v1/annotation_key_constants.go - vendor/k8s.io/api/core/v1/doc.go - vendor/k8s.io/api/core/v1/lifecycle.go - vendor/k8s.io/api/core/v1/objectreference.go - vendor/k8s.io/api/core/v1/register.go - vendor/k8s.io/api/core/v1/resource.go - vendor/k8s.io/api/core/v1/taint.go - vendor/k8s.io/api/core/v1/toleration.go - vendor/k8s.io/api/core/v1/types.go - vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go - vendor/k8s.io/api/core/v1/well_known_labels.go - vendor/k8s.io/api/core/v1/well_known_taints.go - vendor/k8s.io/apimachinery/pkg/api/errors/errors.go - vendor/k8s.io/apimachinery/pkg/api/resource/amount.go - vendor/k8s.io/apimachinery/pkg/api/resource/math.go - vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go - vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go - vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go - vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go - vendor/k8s.io/apimachinery/pkg/conversion/converter.go - vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go - vendor/k8s.io/apimachinery/pkg/conversion/helper.go - vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go - vendor/k8s.io/apimachinery/pkg/fields/fields.go - vendor/k8s.io/apimachinery/pkg/fields/requirements.go - vendor/k8s.io/apimachinery/pkg/fields/selector.go - vendor/k8s.io/apimachinery/pkg/labels/labels.go - vendor/k8s.io/apimachinery/pkg/labels/selector.go - vendor/k8s.io/apimachinery/pkg/runtime/codec.go - vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go - vendor/k8s.io/apimachinery/pkg/runtime/converter.go - vendor/k8s.io/apimachinery/pkg/runtime/embedded.go - vendor/k8s.io/apimachinery/pkg/runtime/error.go - vendor/k8s.io/apimachinery/pkg/runtime/extension.go - vendor/k8s.io/apimachinery/pkg/runtime/helper.go - vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go - vendor/k8s.io/apimachinery/pkg/runtime/mapper.go - vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go - vendor/k8s.io/apimachinery/pkg/runtime/register.go - vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go - vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go - vendor/k8s.io/apimachinery/pkg/runtime/scheme.go - vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go - vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go - vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go - vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go - vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go - vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go - vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go - vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go - vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go - vendor/k8s.io/apimachinery/pkg/runtime/types.go - vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go - vendor/k8s.io/apimachinery/pkg/selection/operator.go - vendor/k8s.io/apimachinery/pkg/types/namespacedname.go - vendor/k8s.io/apimachinery/pkg/types/nodename.go - vendor/k8s.io/apimachinery/pkg/types/patch.go - vendor/k8s.io/apimachinery/pkg/types/uid.go - vendor/k8s.io/apimachinery/pkg/util/clock/clock.go - vendor/k8s.io/apimachinery/pkg/util/errors/errors.go - vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go - vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go - vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go - vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go - vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go - vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go - vendor/k8s.io/apimachinery/pkg/util/json/json.go - vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go - vendor/k8s.io/apimachinery/pkg/util/net/http.go - vendor/k8s.io/apimachinery/pkg/util/net/interface.go - vendor/k8s.io/apimachinery/pkg/util/net/port_range.go - vendor/k8s.io/apimachinery/pkg/util/net/port_split.go - vendor/k8s.io/apimachinery/pkg/util/net/util.go - vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go - vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go - vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go - vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go - vendor/k8s.io/apimachinery/pkg/util/validation/validation.go - vendor/k8s.io/apimachinery/pkg/util/wait/wait.go - vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go - vendor/k8s.io/apimachinery/pkg/version/doc.go - vendor/k8s.io/apimachinery/pkg/version/helpers.go - vendor/k8s.io/apimachinery/pkg/version/types.go - vendor/k8s.io/apimachinery/pkg/watch/filter.go - vendor/k8s.io/apimachinery/pkg/watch/mux.go - vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go - vendor/k8s.io/apimachinery/pkg/watch/watch.go - vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go - vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go - vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go - vendor/k8s.io/client-go/pkg/version/base.go - vendor/k8s.io/client-go/pkg/version/doc.go - vendor/k8s.io/client-go/pkg/version/version.go - vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go - vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go - vendor/k8s.io/client-go/rest/client.go - vendor/k8s.io/client-go/rest/config.go - vendor/k8s.io/client-go/rest/exec.go - vendor/k8s.io/client-go/rest/plugin.go - vendor/k8s.io/client-go/rest/request.go - vendor/k8s.io/client-go/rest/transport.go - vendor/k8s.io/client-go/rest/url_utils.go - vendor/k8s.io/client-go/rest/urlbackoff.go - vendor/k8s.io/client-go/rest/warnings.go - vendor/k8s.io/client-go/rest/watch/decoder.go - vendor/k8s.io/client-go/rest/watch/encoder.go - vendor/k8s.io/client-go/tools/clientcmd/api/doc.go - vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go - vendor/k8s.io/client-go/tools/clientcmd/api/register.go - vendor/k8s.io/client-go/tools/clientcmd/api/types.go - vendor/k8s.io/client-go/tools/remotecommand/errorstream.go - vendor/k8s.io/client-go/tools/remotecommand/reader.go - vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go - vendor/k8s.io/client-go/tools/remotecommand/resize.go - vendor/k8s.io/client-go/tools/remotecommand/v1.go - vendor/k8s.io/client-go/tools/remotecommand/v2.go - vendor/k8s.io/client-go/tools/remotecommand/v3.go - vendor/k8s.io/client-go/tools/remotecommand/v4.go - vendor/k8s.io/client-go/transport/cache.go - vendor/k8s.io/client-go/transport/cert_rotation.go - vendor/k8s.io/client-go/transport/config.go - vendor/k8s.io/client-go/transport/round_trippers.go - vendor/k8s.io/client-go/transport/spdy/spdy.go - vendor/k8s.io/client-go/transport/token_source.go - vendor/k8s.io/client-go/transport/transport.go - vendor/k8s.io/client-go/util/cert/cert.go - vendor/k8s.io/client-go/util/cert/csr.go - vendor/k8s.io/client-go/util/cert/io.go - vendor/k8s.io/client-go/util/cert/pem.go - vendor/k8s.io/client-go/util/cert/server_inspection.go - vendor/k8s.io/client-go/util/exec/exec.go - vendor/k8s.io/client-go/util/flowcontrol/backoff.go - vendor/k8s.io/client-go/util/flowcontrol/throttle.go - vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go - vendor/k8s.io/client-go/util/workqueue/delaying_queue.go - vendor/k8s.io/client-go/util/workqueue/metrics.go - vendor/k8s.io/client-go/util/workqueue/parallelizer.go - vendor/k8s.io/client-go/util/workqueue/queue.go - vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go - vendor/k8s.io/component-base/logs/logreduction/logreduction.go - vendor/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/constants.go - vendor/k8s.io/cri-api/pkg/apis/services.go - vendor/k8s.io/utils/exec/exec.go - vendor/k8s.io/utils/integer/integer.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go - vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go - version/version.go -Copyright: 2011-2016 Canonical Ltd. - 2011-2019 Canonical Ltd - 2013 Matt T. Proud - 2013-2020 The Prometheus Authors - 2014 Docker, Inc. - 2014-2017 Google Inc. - 2014-2018 Square Inc. - 2014-2019 gRPC authors. - 2014-2020 The Kubernetes Authors. - 2015-2018 CoreOS, Inc. - 2015-2019 CNI authors - 2016 The Linux Foundation - 2017 Prometheus Team - 2017 Roger Luethi - 2017-2019 OpenCensus Authors - 2020 IBM Corporation, - The Mo Authors. - The containerd Authors - The containerd Authors. - The docker Authors. - The ocicrypt Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: .empty-mod/go.mod - .gitattributes - .github/ISSUE_TEMPLATE/bug_report.md - .github/ISSUE_TEMPLATE/config.yml - .github/ISSUE_TEMPLATE/feature_request.md - .github/workflows/ci.yml - .github/workflows/codeql.yml - .github/workflows/nightly.yml - .github/workflows/release.yml - .gitignore - .golangci.yml - .mailmap - .zuul.yaml - .zuul/playbooks/containerd-build/integration-test.yaml - .zuul/playbooks/containerd-build/run.yaml - .zuul/playbooks/containerd-build/unit-test.yaml - ADOPTERS.md - BUILDING.md - Protobuild.toml - README.md - RELEASES.md - ROADMAP.md - SCOPE.md - api/1.0.pb.txt - api/1.1.pb.txt - api/1.2.pb.txt - api/README.md - api/events/container.pb.go - api/events/content.pb.go - api/events/image.pb.go - api/events/namespace.pb.go - api/events/snapshot.pb.go - api/events/task.pb.go - api/next.pb.txt - api/services/containers/v1/containers.pb.go - api/services/content/v1/content.pb.go - api/services/diff/v1/diff.pb.go - api/services/events/v1/events.pb.go - api/services/images/v1/images.pb.go - api/services/introspection/v1/introspection.pb.go - api/services/leases/v1/leases.pb.go - api/services/namespaces/v1/namespace.pb.go - api/services/snapshots/v1/snapshots.pb.go - api/services/tasks/v1/tasks.pb.go - api/services/ttrpc/events/v1/events.pb.go - api/services/version/v1/version.pb.go - api/types/descriptor.pb.go - api/types/metrics.pb.go - api/types/mount.pb.go - api/types/platform.pb.go - api/types/task/task.pb.go - code-of-conduct.md - codecov.yml - contrib/Dockerfile.test - contrib/README.md - contrib/ansible/README.md - contrib/ansible/cri-containerd.yaml - contrib/ansible/tasks/binaries.yaml - contrib/ansible/tasks/bootstrap_centos.yaml - contrib/ansible/tasks/bootstrap_ubuntu.yaml - contrib/ansible/tasks/k8s.yaml - contrib/ansible/vars/vars.yaml - contrib/apparmor/template_test.go - contrib/autocomplete/ctr - contrib/autocomplete/zsh_autocomplete - contrib/aws/snapshotter_bench_cf.yml - contrib/aws/snapshotter_bench_readme.md - contrib/gce/cloud-init/master.yaml - contrib/gce/cloud-init/node.yaml - contrib/gce/cni.template - contrib/gce/env - contrib/linuxkit/README.md - design/architecture.md - design/architecture.png - design/data-flow.md - design/data-flow.png - design/lifecycle.md - design/mounts.md - design/snapshot_model.png - design/snapshots.md - docs/.editorconfig - docs/PLUGINS.md - docs/RUNC.md - docs/SECURITY_AUDIT.pdf - docs/client-opts.md - docs/content-flow.md - docs/cri/architecture.md - docs/cri/architecture.png - docs/cri/config.md - docs/cri/containerd.png - docs/cri/cri.png - docs/cri/crictl.md - docs/cri/decryption.md - docs/cri/installation.md - docs/cri/performance.png - docs/cri/registry.md - docs/cri/testing.md - docs/garbage-collection.md - docs/getting-started.md - docs/hosts.md - docs/man/containerd-config.8.md - docs/man/containerd-config.toml.5.md - docs/managed-opt.md - docs/namespaces.md - docs/ops.md - docs/remote-snapshotter.md - docs/rootless.md - docs/stream_processors.md - go.mod - go.sum - integration/client/go.mod - integration/client/go.sum - integration/image_list.sample.toml - pkg/runtimeoptions/v1/api.pb.go - pkg/runtimeoptions/v1/api.proto - protobuf/plugin/fieldpath.pb.go - releases/README.md - releases/v1.0.0.toml - releases/v1.1.0.toml - releases/v1.2.0.toml - releases/v1.3.0.toml - releases/v1.4.0.toml - releases/v1.5.0.toml - releases/v1.5.1.toml - releases/v1.5.2.toml - releases/v1.5.3.toml - releases/v1.5.4.toml - releases/v1.5.5.toml - releases/v1.5.6.toml - releases/v1.5.7.toml - releases/v1.5.8.toml - releases/v1.5.9.toml - reports/2017-01-13.md - reports/2017-01-20.md - reports/2017-01-27.md - reports/2017-02-10.md - reports/2017-02-24.md - reports/2017-03-10.md - reports/2017-03-17.md - reports/2017-03-24.md - reports/2017-04-28.md - reports/2017-05-05.md - reports/2017-05-19.md - reports/2017-05-26.md - reports/2017-06-09.md - reports/2017-06-23.md - runtime/linux/runctypes/1.0.pb.txt - runtime/linux/runctypes/next.pb.txt - runtime/linux/runctypes/runc.pb.go - runtime/linux/runctypes/runc.proto - runtime/v1/shim/v1/shim.pb.go - runtime/v2/README.md - runtime/v2/example/README.md - runtime/v2/runc/options/next.pb.txt - runtime/v2/runc/options/oci.pb.go - runtime/v2/runc/options/oci.proto - runtime/v2/task/shim.pb.go - script/setup/critools-version - script/setup/imgcrypt-version - script/setup/runc-version - snapshots/devmapper/README.md - test/e2e_node/init.yaml - vendor/github.com/Microsoft/go-winio/.gitignore - vendor/github.com/Microsoft/go-winio/CODEOWNERS - vendor/github.com/Microsoft/go-winio/README.md - vendor/github.com/Microsoft/go-winio/backup.go - vendor/github.com/Microsoft/go-winio/backuptar/noop.go - vendor/github.com/Microsoft/go-winio/backuptar/strconv.go - vendor/github.com/Microsoft/go-winio/backuptar/tar.go - vendor/github.com/Microsoft/go-winio/ea.go - vendor/github.com/Microsoft/go-winio/file.go - vendor/github.com/Microsoft/go-winio/fileinfo.go - vendor/github.com/Microsoft/go-winio/go.mod - vendor/github.com/Microsoft/go-winio/go.sum - vendor/github.com/Microsoft/go-winio/hvsock.go - vendor/github.com/Microsoft/go-winio/pipe.go - vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go - vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go - vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go - vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go - vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go - vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go - vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go - vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go - vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go - vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go - vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go - vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go - vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go - vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go - vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go - vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go - vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go - vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go - vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go - vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go - vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go - vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go - vendor/github.com/Microsoft/go-winio/privilege.go - vendor/github.com/Microsoft/go-winio/reparse.go - vendor/github.com/Microsoft/go-winio/sd.go - vendor/github.com/Microsoft/go-winio/syscall.go - vendor/github.com/Microsoft/go-winio/vhd/vhd.go - vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go - vendor/github.com/Microsoft/go-winio/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/.gitattributes - vendor/github.com/Microsoft/hcsshim/.gitignore - vendor/github.com/Microsoft/hcsshim/CODEOWNERS - vendor/github.com/Microsoft/hcsshim/Protobuild.toml - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/doc.go - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/next.pb.txt - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/doc.go - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/next.pb.txt - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go - vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto - vendor/github.com/Microsoft/hcsshim/computestorage/attach.go - vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go - vendor/github.com/Microsoft/hcsshim/computestorage/detach.go - vendor/github.com/Microsoft/hcsshim/computestorage/export.go - vendor/github.com/Microsoft/hcsshim/computestorage/format.go - vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go - vendor/github.com/Microsoft/hcsshim/computestorage/import.go - vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go - vendor/github.com/Microsoft/hcsshim/computestorage/mount.go - vendor/github.com/Microsoft/hcsshim/computestorage/setup.go - vendor/github.com/Microsoft/hcsshim/computestorage/storage.go - vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/container.go - vendor/github.com/Microsoft/hcsshim/errors.go - vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go - vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go - vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go - vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go - vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 - vendor/github.com/Microsoft/hcsshim/go.mod - vendor/github.com/Microsoft/hcsshim/go.sum - vendor/github.com/Microsoft/hcsshim/hcn/hcn.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go - vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go - vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/hcsshim.go - vendor/github.com/Microsoft/hcsshim/hnsendpoint.go - vendor/github.com/Microsoft/hcsshim/hnsglobals.go - vendor/github.com/Microsoft/hcsshim/hnsnetwork.go - vendor/github.com/Microsoft/hcsshim/hnspolicy.go - vendor/github.com/Microsoft/hcsshim/hnspolicylist.go - vendor/github.com/Microsoft/hcsshim/hnssupport.go - vendor/github.com/Microsoft/hcsshim/interface.go - vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go - vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go - vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go - vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go - vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go - vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go - vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go - vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/internal/log/g.go - vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go - vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go - vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go - vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go - vendor/github.com/Microsoft/hcsshim/internal/oc/span.go - vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go - vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go - vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go - vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go - vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go - vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go - vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go - vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go - vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go - vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go - vendor/github.com/Microsoft/hcsshim/layer.go - vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go - vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go - vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go - vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go - vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go - vendor/github.com/Microsoft/hcsshim/process.go - vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go - vendor/github.com/beorn7/perks/quantile/exampledata.txt - vendor/github.com/beorn7/perks/quantile/stream.go - vendor/github.com/bits-and-blooms/bitset/.gitignore - vendor/github.com/bits-and-blooms/bitset/.travis.yml - vendor/github.com/bits-and-blooms/bitset/README.md - vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml - vendor/github.com/bits-and-blooms/bitset/bitset.go - vendor/github.com/bits-and-blooms/bitset/go.mod - vendor/github.com/bits-and-blooms/bitset/go.sum - vendor/github.com/bits-and-blooms/bitset/popcnt.go - vendor/github.com/bits-and-blooms/bitset/popcnt_19.go - vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go - vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s - vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go - vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go - vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go - vendor/github.com/cespare/xxhash/v2/.travis.yml - vendor/github.com/cespare/xxhash/v2/README.md - vendor/github.com/cespare/xxhash/v2/go.mod - vendor/github.com/cespare/xxhash/v2/go.sum - vendor/github.com/cespare/xxhash/v2/xxhash.go - vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go - vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s - vendor/github.com/cespare/xxhash/v2/xxhash_other.go - vendor/github.com/cespare/xxhash/v2/xxhash_safe.go - vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go - vendor/github.com/cilium/ebpf/.clang-format - vendor/github.com/cilium/ebpf/.gitignore - vendor/github.com/cilium/ebpf/.golangci.yaml - vendor/github.com/cilium/ebpf/ARCHITECTURE.md - vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md - vendor/github.com/cilium/ebpf/CONTRIBUTING.md - vendor/github.com/cilium/ebpf/Makefile - vendor/github.com/cilium/ebpf/README.md - vendor/github.com/cilium/ebpf/asm/alu.go - vendor/github.com/cilium/ebpf/asm/alu_string.go - vendor/github.com/cilium/ebpf/asm/doc.go - vendor/github.com/cilium/ebpf/asm/func.go - vendor/github.com/cilium/ebpf/asm/func_string.go - vendor/github.com/cilium/ebpf/asm/instruction.go - vendor/github.com/cilium/ebpf/asm/jump.go - vendor/github.com/cilium/ebpf/asm/jump_string.go - vendor/github.com/cilium/ebpf/asm/load_store.go - vendor/github.com/cilium/ebpf/asm/load_store_string.go - vendor/github.com/cilium/ebpf/asm/opcode.go - vendor/github.com/cilium/ebpf/asm/opcode_string.go - vendor/github.com/cilium/ebpf/asm/register.go - vendor/github.com/cilium/ebpf/collection.go - vendor/github.com/cilium/ebpf/doc.go - vendor/github.com/cilium/ebpf/elf_reader.go - vendor/github.com/cilium/ebpf/elf_reader_fuzz.go - vendor/github.com/cilium/ebpf/go.mod - vendor/github.com/cilium/ebpf/go.sum - vendor/github.com/cilium/ebpf/info.go - vendor/github.com/cilium/ebpf/internal/btf/btf.go - vendor/github.com/cilium/ebpf/internal/btf/btf_types.go - vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go - vendor/github.com/cilium/ebpf/internal/btf/core.go - vendor/github.com/cilium/ebpf/internal/btf/doc.go - vendor/github.com/cilium/ebpf/internal/btf/ext_info.go - vendor/github.com/cilium/ebpf/internal/btf/fuzz.go - vendor/github.com/cilium/ebpf/internal/btf/strings.go - vendor/github.com/cilium/ebpf/internal/btf/types.go - vendor/github.com/cilium/ebpf/internal/cpu.go - vendor/github.com/cilium/ebpf/internal/elf.go - vendor/github.com/cilium/ebpf/internal/endian.go - vendor/github.com/cilium/ebpf/internal/errors.go - vendor/github.com/cilium/ebpf/internal/fd.go - vendor/github.com/cilium/ebpf/internal/feature.go - vendor/github.com/cilium/ebpf/internal/io.go - vendor/github.com/cilium/ebpf/internal/pinning.go - vendor/github.com/cilium/ebpf/internal/ptr.go - vendor/github.com/cilium/ebpf/internal/ptr_32_be.go - vendor/github.com/cilium/ebpf/internal/ptr_32_le.go - vendor/github.com/cilium/ebpf/internal/ptr_64.go - vendor/github.com/cilium/ebpf/internal/syscall.go - vendor/github.com/cilium/ebpf/internal/syscall_string.go - vendor/github.com/cilium/ebpf/internal/unix/types_linux.go - vendor/github.com/cilium/ebpf/internal/unix/types_other.go - vendor/github.com/cilium/ebpf/internal/version.go - vendor/github.com/cilium/ebpf/link/cgroup.go - vendor/github.com/cilium/ebpf/link/doc.go - vendor/github.com/cilium/ebpf/link/iter.go - vendor/github.com/cilium/ebpf/link/kprobe.go - vendor/github.com/cilium/ebpf/link/link.go - vendor/github.com/cilium/ebpf/link/netns.go - vendor/github.com/cilium/ebpf/link/perf_event.go - vendor/github.com/cilium/ebpf/link/platform.go - vendor/github.com/cilium/ebpf/link/program.go - vendor/github.com/cilium/ebpf/link/raw_tracepoint.go - vendor/github.com/cilium/ebpf/link/syscalls.go - vendor/github.com/cilium/ebpf/link/tracepoint.go - vendor/github.com/cilium/ebpf/link/uprobe.go - vendor/github.com/cilium/ebpf/linker.go - vendor/github.com/cilium/ebpf/map.go - vendor/github.com/cilium/ebpf/marshalers.go - vendor/github.com/cilium/ebpf/prog.go - vendor/github.com/cilium/ebpf/run-tests.sh - vendor/github.com/cilium/ebpf/syscalls.go - vendor/github.com/cilium/ebpf/types.go - vendor/github.com/cilium/ebpf/types_string.go - vendor/github.com/containerd/aufs/.golangci.yml - vendor/github.com/containerd/aufs/README.md - vendor/github.com/containerd/aufs/go.mod - vendor/github.com/containerd/aufs/go.sum - vendor/github.com/containerd/btrfs/.gitignore - vendor/github.com/containerd/btrfs/README.md - vendor/github.com/containerd/btrfs/go.mod - vendor/github.com/containerd/btrfs/go.sum - vendor/github.com/containerd/cgroups/.gitignore - vendor/github.com/containerd/cgroups/Protobuild.toml - vendor/github.com/containerd/cgroups/README.md - vendor/github.com/containerd/cgroups/Vagrantfile - vendor/github.com/containerd/cgroups/go.mod - vendor/github.com/containerd/cgroups/go.sum - vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go - vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt - vendor/github.com/containerd/cgroups/stats/v1/metrics.proto - vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go - vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.txt - vendor/github.com/containerd/cgroups/v2/stats/metrics.proto - vendor/github.com/containerd/console/.golangci.yml - vendor/github.com/containerd/console/README.md - vendor/github.com/containerd/console/go.mod - vendor/github.com/containerd/console/go.sum - vendor/github.com/containerd/continuity/.gitignore - vendor/github.com/containerd/continuity/.golangci.yml - vendor/github.com/containerd/continuity/.mailmap - vendor/github.com/containerd/continuity/AUTHORS - vendor/github.com/containerd/continuity/README.md - vendor/github.com/containerd/continuity/go.mod - vendor/github.com/containerd/continuity/go.sum - vendor/github.com/containerd/continuity/proto/manifest.pb.go - vendor/github.com/containerd/continuity/proto/manifest.proto - vendor/github.com/containerd/continuity/sysx/README.md - vendor/github.com/containerd/fifo/.gitattributes - vendor/github.com/containerd/fifo/.gitignore - vendor/github.com/containerd/fifo/.golangci.yml - vendor/github.com/containerd/fifo/go.mod - vendor/github.com/containerd/fifo/go.sum - vendor/github.com/containerd/fifo/readme.md - vendor/github.com/containerd/go-cni/.golangci.yml - vendor/github.com/containerd/go-cni/README.md - vendor/github.com/containerd/go-cni/go.mod - vendor/github.com/containerd/go-cni/go.sum - vendor/github.com/containerd/go-runc/.travis.yml - vendor/github.com/containerd/go-runc/README.md - vendor/github.com/containerd/go-runc/go.mod - vendor/github.com/containerd/go-runc/go.sum - vendor/github.com/containerd/imgcrypt/.gitignore - vendor/github.com/containerd/imgcrypt/.golangci.yml - vendor/github.com/containerd/imgcrypt/CHANGES - vendor/github.com/containerd/imgcrypt/MAINTAINERS - vendor/github.com/containerd/imgcrypt/README.md - vendor/github.com/containerd/imgcrypt/go.mod - vendor/github.com/containerd/imgcrypt/go.sum - vendor/github.com/containerd/nri/.golangci.yml - vendor/github.com/containerd/nri/README.md - vendor/github.com/containerd/nri/go.mod - vendor/github.com/containerd/nri/go.sum - vendor/github.com/containerd/ttrpc/.gitignore - vendor/github.com/containerd/ttrpc/README.md - vendor/github.com/containerd/ttrpc/go.mod - vendor/github.com/containerd/ttrpc/go.sum - vendor/github.com/containerd/typeurl/.gitignore - vendor/github.com/containerd/typeurl/README.md - vendor/github.com/containerd/typeurl/go.mod - vendor/github.com/containerd/typeurl/go.sum - vendor/github.com/containerd/zfs/.gitignore - vendor/github.com/containerd/zfs/README.md - vendor/github.com/containerd/zfs/codecov.yml - vendor/github.com/containerd/zfs/go.mod - vendor/github.com/containerd/zfs/go.sum - vendor/github.com/containernetworking/plugins/pkg/ns/README.md - vendor/github.com/containers/ocicrypt/.travis.yml - vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md - vendor/github.com/containers/ocicrypt/MAINTAINERS - vendor/github.com/containers/ocicrypt/README.md - vendor/github.com/containers/ocicrypt/SECURITY.md - vendor/github.com/containers/ocicrypt/go.mod - vendor/github.com/containers/ocicrypt/go.sum - vendor/github.com/containers/ocicrypt/spec/spec.go - vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go - vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto - vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go - vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go - vendor/github.com/docker/go-events/.gitignore - vendor/github.com/docker/go-events/MAINTAINERS - vendor/github.com/docker/go-events/broadcast.go - vendor/github.com/docker/go-events/channel.go - vendor/github.com/docker/go-events/errors.go - vendor/github.com/docker/go-events/event.go - vendor/github.com/docker/go-events/filter.go - vendor/github.com/docker/go-events/queue.go - vendor/github.com/docker/go-events/retry.go - vendor/github.com/docker/go-metrics/README.md - vendor/github.com/docker/go-metrics/counter.go - vendor/github.com/docker/go-metrics/docs.go - vendor/github.com/docker/go-metrics/gauge.go - vendor/github.com/docker/go-metrics/go.mod - vendor/github.com/docker/go-metrics/go.sum - vendor/github.com/docker/go-metrics/handler.go - vendor/github.com/docker/go-metrics/helpers.go - vendor/github.com/docker/go-metrics/namespace.go - vendor/github.com/docker/go-metrics/register.go - vendor/github.com/docker/go-metrics/timer.go - vendor/github.com/docker/go-metrics/unit.go - vendor/github.com/docker/go-units/MAINTAINERS - vendor/github.com/docker/go-units/circle.yml - vendor/github.com/docker/go-units/duration.go - vendor/github.com/docker/go-units/size.go - vendor/github.com/docker/go-units/ulimit.go - vendor/github.com/docker/spdystream/CONTRIBUTING.md - vendor/github.com/docker/spdystream/MAINTAINERS - vendor/github.com/docker/spdystream/README.md - vendor/github.com/docker/spdystream/connection.go - vendor/github.com/docker/spdystream/handlers.go - vendor/github.com/docker/spdystream/priority.go - vendor/github.com/docker/spdystream/stream.go - vendor/github.com/docker/spdystream/utils.go - vendor/github.com/emicklei/go-restful/.gitignore - vendor/github.com/emicklei/go-restful/.travis.yml - vendor/github.com/emicklei/go-restful/CHANGES.md - vendor/github.com/emicklei/go-restful/Makefile - vendor/github.com/emicklei/go-restful/Srcfile - vendor/github.com/emicklei/go-restful/bench_test.sh - vendor/github.com/emicklei/go-restful/coverage.sh - vendor/github.com/emicklei/go-restful/json.go - vendor/github.com/emicklei/go-restful/jsoniter.go - vendor/github.com/emicklei/go-restful/log/log.go - vendor/github.com/emicklei/go-restful/mime.go - vendor/github.com/fsnotify/fsnotify/.editorconfig - vendor/github.com/fsnotify/fsnotify/.gitattributes - vendor/github.com/fsnotify/fsnotify/.gitignore - vendor/github.com/fsnotify/fsnotify/.travis.yml - vendor/github.com/fsnotify/fsnotify/AUTHORS - vendor/github.com/fsnotify/fsnotify/CHANGELOG.md - vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md - vendor/github.com/fsnotify/fsnotify/README.md - vendor/github.com/fsnotify/fsnotify/go.mod - vendor/github.com/fsnotify/fsnotify/go.sum - vendor/github.com/go-logr/logr/README.md - vendor/github.com/go-logr/logr/go.mod - vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md - vendor/github.com/godbus/dbus/v5/MAINTAINERS - vendor/github.com/godbus/dbus/v5/README.markdown - vendor/github.com/godbus/dbus/v5/auth.go - vendor/github.com/godbus/dbus/v5/auth_anonymous.go - vendor/github.com/godbus/dbus/v5/auth_external.go - vendor/github.com/godbus/dbus/v5/auth_sha1.go - vendor/github.com/godbus/dbus/v5/call.go - vendor/github.com/godbus/dbus/v5/conn.go - vendor/github.com/godbus/dbus/v5/conn_darwin.go - vendor/github.com/godbus/dbus/v5/conn_other.go - vendor/github.com/godbus/dbus/v5/conn_unix.go - vendor/github.com/godbus/dbus/v5/conn_windows.go - vendor/github.com/godbus/dbus/v5/dbus.go - vendor/github.com/godbus/dbus/v5/decoder.go - vendor/github.com/godbus/dbus/v5/default_handler.go - vendor/github.com/godbus/dbus/v5/doc.go - vendor/github.com/godbus/dbus/v5/encoder.go - vendor/github.com/godbus/dbus/v5/export.go - vendor/github.com/godbus/dbus/v5/go.mod - vendor/github.com/godbus/dbus/v5/go.sum - vendor/github.com/godbus/dbus/v5/homedir.go - vendor/github.com/godbus/dbus/v5/homedir_dynamic.go - vendor/github.com/godbus/dbus/v5/homedir_static.go - vendor/github.com/godbus/dbus/v5/match.go - vendor/github.com/godbus/dbus/v5/message.go - vendor/github.com/godbus/dbus/v5/object.go - vendor/github.com/godbus/dbus/v5/sequence.go - vendor/github.com/godbus/dbus/v5/sequential_handler.go - vendor/github.com/godbus/dbus/v5/server_interfaces.go - vendor/github.com/godbus/dbus/v5/sig.go - vendor/github.com/godbus/dbus/v5/transport_darwin.go - vendor/github.com/godbus/dbus/v5/transport_generic.go - vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go - vendor/github.com/godbus/dbus/v5/transport_tcp.go - vendor/github.com/godbus/dbus/v5/transport_unix.go - vendor/github.com/godbus/dbus/v5/transport_unixcred_dragonfly.go - vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go - vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go - vendor/github.com/godbus/dbus/v5/transport_unixcred_openbsd.go - vendor/github.com/godbus/dbus/v5/variant.go - vendor/github.com/godbus/dbus/v5/variant_lexer.go - vendor/github.com/godbus/dbus/v5/variant_parser.go - vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go - vendor/github.com/gogo/googleapis/google/rpc/status.pb.go - vendor/github.com/gogo/protobuf/AUTHORS - vendor/github.com/gogo/protobuf/CONTRIBUTORS - vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go - vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden - vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go - vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go - vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go - vendor/github.com/gogo/protobuf/types/any.pb.go - vendor/github.com/gogo/protobuf/types/api.pb.go - vendor/github.com/gogo/protobuf/types/duration.pb.go - vendor/github.com/gogo/protobuf/types/empty.pb.go - vendor/github.com/gogo/protobuf/types/field_mask.pb.go - vendor/github.com/gogo/protobuf/types/protosize.go - vendor/github.com/gogo/protobuf/types/source_context.pb.go - vendor/github.com/gogo/protobuf/types/struct.pb.go - vendor/github.com/gogo/protobuf/types/timestamp.pb.go - vendor/github.com/gogo/protobuf/types/type.pb.go - vendor/github.com/gogo/protobuf/types/wrappers.pb.go - vendor/github.com/golang/protobuf/AUTHORS - vendor/github.com/golang/protobuf/CONTRIBUTORS - vendor/github.com/golang/protobuf/ptypes/any/any.pb.go - vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go - vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go - vendor/github.com/google/gofuzz/.travis.yml - vendor/github.com/google/gofuzz/CONTRIBUTING.md - vendor/github.com/google/gofuzz/README.md - vendor/github.com/google/gofuzz/go.mod - vendor/github.com/google/uuid/.travis.yml - vendor/github.com/google/uuid/CONTRIBUTING.md - vendor/github.com/google/uuid/CONTRIBUTORS - vendor/github.com/google/uuid/README.md - vendor/github.com/google/uuid/go.mod - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go - vendor/github.com/hashicorp/errwrap/README.md - vendor/github.com/hashicorp/errwrap/errwrap.go - vendor/github.com/hashicorp/errwrap/go.mod - vendor/github.com/hashicorp/go-multierror/.travis.yml - vendor/github.com/hashicorp/go-multierror/Makefile - vendor/github.com/hashicorp/go-multierror/README.md - vendor/github.com/hashicorp/go-multierror/append.go - vendor/github.com/hashicorp/go-multierror/flatten.go - vendor/github.com/hashicorp/go-multierror/format.go - vendor/github.com/hashicorp/go-multierror/go.mod - vendor/github.com/hashicorp/go-multierror/go.sum - vendor/github.com/hashicorp/go-multierror/multierror.go - vendor/github.com/hashicorp/go-multierror/prefix.go - vendor/github.com/hashicorp/go-multierror/sort.go - vendor/github.com/imdario/mergo/.deepsource.toml - vendor/github.com/imdario/mergo/.gitignore - vendor/github.com/imdario/mergo/.travis.yml - vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md - vendor/github.com/imdario/mergo/README.md - vendor/github.com/imdario/mergo/go.mod - vendor/github.com/imdario/mergo/go.sum - vendor/github.com/json-iterator/go/.codecov.yml - vendor/github.com/json-iterator/go/.gitignore - vendor/github.com/json-iterator/go/.travis.yml - vendor/github.com/json-iterator/go/Gopkg.lock - vendor/github.com/json-iterator/go/Gopkg.toml - vendor/github.com/json-iterator/go/README.md - vendor/github.com/json-iterator/go/adapter.go - vendor/github.com/json-iterator/go/any.go - vendor/github.com/json-iterator/go/any_array.go - vendor/github.com/json-iterator/go/any_bool.go - vendor/github.com/json-iterator/go/any_float.go - vendor/github.com/json-iterator/go/any_int32.go - vendor/github.com/json-iterator/go/any_int64.go - vendor/github.com/json-iterator/go/any_invalid.go - vendor/github.com/json-iterator/go/any_nil.go - vendor/github.com/json-iterator/go/any_number.go - vendor/github.com/json-iterator/go/any_object.go - vendor/github.com/json-iterator/go/any_str.go - vendor/github.com/json-iterator/go/any_uint32.go - vendor/github.com/json-iterator/go/any_uint64.go - vendor/github.com/json-iterator/go/build.sh - vendor/github.com/json-iterator/go/config.go - vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md - vendor/github.com/json-iterator/go/go.mod - vendor/github.com/json-iterator/go/go.sum - vendor/github.com/json-iterator/go/iter.go - vendor/github.com/json-iterator/go/iter_array.go - vendor/github.com/json-iterator/go/iter_float.go - vendor/github.com/json-iterator/go/iter_int.go - vendor/github.com/json-iterator/go/iter_object.go - vendor/github.com/json-iterator/go/iter_skip.go - vendor/github.com/json-iterator/go/iter_skip_sloppy.go - vendor/github.com/json-iterator/go/iter_skip_strict.go - vendor/github.com/json-iterator/go/iter_str.go - vendor/github.com/json-iterator/go/jsoniter.go - vendor/github.com/json-iterator/go/pool.go - vendor/github.com/json-iterator/go/reflect.go - vendor/github.com/json-iterator/go/reflect_array.go - vendor/github.com/json-iterator/go/reflect_dynamic.go - vendor/github.com/json-iterator/go/reflect_extension.go - vendor/github.com/json-iterator/go/reflect_json_number.go - vendor/github.com/json-iterator/go/reflect_json_raw_message.go - vendor/github.com/json-iterator/go/reflect_map.go - vendor/github.com/json-iterator/go/reflect_marshaler.go - vendor/github.com/json-iterator/go/reflect_native.go - vendor/github.com/json-iterator/go/reflect_optional.go - vendor/github.com/json-iterator/go/reflect_slice.go - vendor/github.com/json-iterator/go/reflect_struct_decoder.go - vendor/github.com/json-iterator/go/reflect_struct_encoder.go - vendor/github.com/json-iterator/go/stream.go - vendor/github.com/json-iterator/go/stream_float.go - vendor/github.com/json-iterator/go/stream_int.go - vendor/github.com/json-iterator/go/stream_str.go - vendor/github.com/json-iterator/go/test.sh - vendor/github.com/klauspost/compress/fse/README.md - vendor/github.com/klauspost/compress/fse/decompress.go - vendor/github.com/klauspost/compress/huff0/.gitignore - vendor/github.com/klauspost/compress/huff0/README.md - vendor/github.com/klauspost/compress/huff0/compress.go - vendor/github.com/klauspost/compress/huff0/decompress.go - vendor/github.com/klauspost/compress/huff0/huff0.go - vendor/github.com/klauspost/compress/snappy/.gitignore - vendor/github.com/klauspost/compress/snappy/AUTHORS - vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS - vendor/github.com/klauspost/compress/snappy/README - vendor/github.com/klauspost/compress/snappy/runbench.cmd - vendor/github.com/klauspost/compress/zstd/README.md - vendor/github.com/klauspost/compress/zstd/blocktype_string.go - vendor/github.com/klauspost/compress/zstd/dict.go - vendor/github.com/klauspost/compress/zstd/enc_base.go - vendor/github.com/klauspost/compress/zstd/encoder_options.go - vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md - vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go - vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go - vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s - vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go - vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go - vendor/github.com/klauspost/compress/zstd/zstd.go - vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore - vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile - vendor/github.com/miekg/pkcs11/.gitignore - vendor/github.com/miekg/pkcs11/.travis.yml - vendor/github.com/miekg/pkcs11/Makefile.release - vendor/github.com/miekg/pkcs11/README.md - vendor/github.com/miekg/pkcs11/go.mod - vendor/github.com/miekg/pkcs11/hsm.db - vendor/github.com/miekg/pkcs11/pkcs11go.h - vendor/github.com/miekg/pkcs11/release.go - vendor/github.com/miekg/pkcs11/softhsm.conf - vendor/github.com/miekg/pkcs11/softhsm2.conf - vendor/github.com/miekg/pkcs11/vendor.go - vendor/github.com/mistifyio/go-zfs/.gitignore - vendor/github.com/mistifyio/go-zfs/.travis.yml - vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md - vendor/github.com/mistifyio/go-zfs/README.md - vendor/github.com/mistifyio/go-zfs/Vagrantfile - vendor/github.com/mistifyio/go-zfs/error.go - vendor/github.com/mistifyio/go-zfs/utils.go - vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go - vendor/github.com/mistifyio/go-zfs/utils_solaris.go - vendor/github.com/mistifyio/go-zfs/zfs.go - vendor/github.com/mistifyio/go-zfs/zpool.go - vendor/github.com/moby/locker/README.md - vendor/github.com/moby/locker/go.mod - vendor/github.com/moby/locker/locker.go - vendor/github.com/moby/sys/mountinfo/doc.go - vendor/github.com/moby/sys/mountinfo/go.mod - vendor/github.com/moby/sys/mountinfo/go.sum - vendor/github.com/moby/sys/mountinfo/mounted_linux.go - vendor/github.com/moby/sys/mountinfo/mounted_unix.go - vendor/github.com/moby/sys/mountinfo/mountinfo.go - vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go - vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go - vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go - vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go - vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go - vendor/github.com/moby/sys/symlink/README.md - vendor/github.com/moby/sys/symlink/doc.go - vendor/github.com/moby/sys/symlink/fs_unix.go - vendor/github.com/moby/sys/symlink/fs_windows.go - vendor/github.com/moby/sys/symlink/go.mod - vendor/github.com/moby/sys/symlink/go.sum - vendor/github.com/modern-go/concurrent/.gitignore - vendor/github.com/modern-go/concurrent/.travis.yml - vendor/github.com/modern-go/concurrent/README.md - vendor/github.com/modern-go/concurrent/executor.go - vendor/github.com/modern-go/concurrent/go_above_19.go - vendor/github.com/modern-go/concurrent/go_below_19.go - vendor/github.com/modern-go/concurrent/log.go - vendor/github.com/modern-go/concurrent/test.sh - vendor/github.com/modern-go/concurrent/unbounded_executor.go - vendor/github.com/modern-go/reflect2/.gitignore - vendor/github.com/modern-go/reflect2/.travis.yml - vendor/github.com/modern-go/reflect2/Gopkg.lock - vendor/github.com/modern-go/reflect2/Gopkg.toml - vendor/github.com/modern-go/reflect2/README.md - vendor/github.com/modern-go/reflect2/go_above_17.go - vendor/github.com/modern-go/reflect2/go_above_19.go - vendor/github.com/modern-go/reflect2/go_below_17.go - vendor/github.com/modern-go/reflect2/go_below_19.go - vendor/github.com/modern-go/reflect2/reflect2.go - vendor/github.com/modern-go/reflect2/reflect2_amd64.s - vendor/github.com/modern-go/reflect2/reflect2_kind.go - vendor/github.com/modern-go/reflect2/relfect2_386.s - vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s - vendor/github.com/modern-go/reflect2/relfect2_arm.s - vendor/github.com/modern-go/reflect2/relfect2_arm64.s - vendor/github.com/modern-go/reflect2/relfect2_mips64x.s - vendor/github.com/modern-go/reflect2/relfect2_mipsx.s - vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s - vendor/github.com/modern-go/reflect2/relfect2_s390x.s - vendor/github.com/modern-go/reflect2/safe_field.go - vendor/github.com/modern-go/reflect2/safe_map.go - vendor/github.com/modern-go/reflect2/safe_slice.go - vendor/github.com/modern-go/reflect2/safe_struct.go - vendor/github.com/modern-go/reflect2/safe_type.go - vendor/github.com/modern-go/reflect2/test.sh - vendor/github.com/modern-go/reflect2/type_map.go - vendor/github.com/modern-go/reflect2/unsafe_array.go - vendor/github.com/modern-go/reflect2/unsafe_eface.go - vendor/github.com/modern-go/reflect2/unsafe_field.go - vendor/github.com/modern-go/reflect2/unsafe_iface.go - vendor/github.com/modern-go/reflect2/unsafe_link.go - vendor/github.com/modern-go/reflect2/unsafe_map.go - vendor/github.com/modern-go/reflect2/unsafe_ptr.go - vendor/github.com/modern-go/reflect2/unsafe_slice.go - vendor/github.com/modern-go/reflect2/unsafe_struct.go - vendor/github.com/modern-go/reflect2/unsafe_type.go - vendor/github.com/opencontainers/go-digest/.mailmap - vendor/github.com/opencontainers/go-digest/.pullapprove.yml - vendor/github.com/opencontainers/go-digest/.travis.yml - vendor/github.com/opencontainers/go-digest/MAINTAINERS - vendor/github.com/opencontainers/go-digest/go.mod - vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go - vendor/github.com/opencontainers/runc/libcontainer/user/user.go - vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go - vendor/github.com/opencontainers/runtime-spec/specs-go/config.go - vendor/github.com/opencontainers/runtime-spec/specs-go/state.go - vendor/github.com/opencontainers/runtime-spec/specs-go/version.go - vendor/github.com/opencontainers/selinux/go-selinux/doc.go - vendor/github.com/opencontainers/selinux/go-selinux/label/label.go - vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go - vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go - vendor/github.com/opencontainers/selinux/go-selinux/selinux.go - vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go - vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go - vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go - vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md - vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go - vendor/github.com/pelletier/go-toml/.dockerignore - vendor/github.com/pelletier/go-toml/.gitignore - vendor/github.com/pelletier/go-toml/CONTRIBUTING.md - vendor/github.com/pelletier/go-toml/Dockerfile - vendor/github.com/pelletier/go-toml/Makefile - vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md - vendor/github.com/pelletier/go-toml/README.md - vendor/github.com/pelletier/go-toml/azure-pipelines.yml - vendor/github.com/pelletier/go-toml/benchmark.sh - vendor/github.com/pelletier/go-toml/doc.go - vendor/github.com/pelletier/go-toml/example-crlf.toml - vendor/github.com/pelletier/go-toml/example.toml - vendor/github.com/pelletier/go-toml/fuzz.go - vendor/github.com/pelletier/go-toml/fuzz.sh - vendor/github.com/pelletier/go-toml/fuzzit.sh - vendor/github.com/pelletier/go-toml/go.mod - vendor/github.com/pelletier/go-toml/go.sum - vendor/github.com/pelletier/go-toml/keysparsing.go - vendor/github.com/pelletier/go-toml/lexer.go - vendor/github.com/pelletier/go-toml/marshal.go - vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml - vendor/github.com/pelletier/go-toml/marshal_test.toml - vendor/github.com/pelletier/go-toml/parser.go - vendor/github.com/pelletier/go-toml/position.go - vendor/github.com/pelletier/go-toml/token.go - vendor/github.com/pelletier/go-toml/toml.go - vendor/github.com/pelletier/go-toml/tomltree_create.go - vendor/github.com/pelletier/go-toml/tomltree_write.go - vendor/github.com/pkg/errors/.gitignore - vendor/github.com/pkg/errors/.travis.yml - vendor/github.com/pkg/errors/Makefile - vendor/github.com/pkg/errors/README.md - vendor/github.com/pkg/errors/appveyor.yml - vendor/github.com/pkg/errors/errors.go - vendor/github.com/pkg/errors/go113.go - vendor/github.com/pkg/errors/stack.go - vendor/github.com/pmezard/go-difflib/difflib/difflib.go - vendor/github.com/prometheus/client_golang/prometheus/.gitignore - vendor/github.com/prometheus/client_golang/prometheus/README.md - vendor/github.com/prometheus/client_model/go/metrics.pb.go - vendor/github.com/prometheus/procfs/.gitignore - vendor/github.com/prometheus/procfs/.golangci.yml - vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md - vendor/github.com/prometheus/procfs/CONTRIBUTING.md - vendor/github.com/prometheus/procfs/MAINTAINERS.md - vendor/github.com/prometheus/procfs/README.md - vendor/github.com/prometheus/procfs/SECURITY.md - vendor/github.com/prometheus/procfs/fixtures.ttar - vendor/github.com/prometheus/procfs/go.mod - vendor/github.com/prometheus/procfs/go.sum - vendor/github.com/russross/blackfriday/v2/.gitignore - vendor/github.com/russross/blackfriday/v2/.travis.yml - vendor/github.com/russross/blackfriday/v2/LICENSE.txt - vendor/github.com/russross/blackfriday/v2/README.md - vendor/github.com/russross/blackfriday/v2/doc.go - vendor/github.com/russross/blackfriday/v2/esc.go - vendor/github.com/russross/blackfriday/v2/go.mod - vendor/github.com/russross/blackfriday/v2/node.go - vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml - vendor/github.com/shurcooL/sanitized_anchor_name/README.md - vendor/github.com/shurcooL/sanitized_anchor_name/go.mod - vendor/github.com/shurcooL/sanitized_anchor_name/main.go - vendor/github.com/sirupsen/logrus/.gitignore - vendor/github.com/sirupsen/logrus/.golangci.yml - vendor/github.com/sirupsen/logrus/.travis.yml - vendor/github.com/sirupsen/logrus/CHANGELOG.md - vendor/github.com/sirupsen/logrus/README.md - vendor/github.com/sirupsen/logrus/appveyor.yml - vendor/github.com/sirupsen/logrus/buffer_pool.go - vendor/github.com/sirupsen/logrus/doc.go - vendor/github.com/sirupsen/logrus/entry.go - vendor/github.com/sirupsen/logrus/exported.go - vendor/github.com/sirupsen/logrus/formatter.go - vendor/github.com/sirupsen/logrus/go.mod - vendor/github.com/sirupsen/logrus/go.sum - vendor/github.com/sirupsen/logrus/hooks.go - vendor/github.com/sirupsen/logrus/json_formatter.go - vendor/github.com/sirupsen/logrus/logger.go - vendor/github.com/sirupsen/logrus/logrus.go - vendor/github.com/sirupsen/logrus/terminal_check_appengine.go - vendor/github.com/sirupsen/logrus/terminal_check_bsd.go - vendor/github.com/sirupsen/logrus/terminal_check_js.go - vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go - vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go - vendor/github.com/sirupsen/logrus/terminal_check_solaris.go - vendor/github.com/sirupsen/logrus/terminal_check_unix.go - vendor/github.com/sirupsen/logrus/terminal_check_windows.go - vendor/github.com/sirupsen/logrus/text_formatter.go - vendor/github.com/sirupsen/logrus/writer.go - vendor/github.com/stefanberger/go-pkcs11uri/.gitignore - vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml - vendor/github.com/stefanberger/go-pkcs11uri/README.md - vendor/github.com/stretchr/testify/assert/assertion_compare.go - vendor/github.com/stretchr/testify/assert/assertion_format.go - vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl - vendor/github.com/stretchr/testify/assert/assertion_forward.go - vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl - vendor/github.com/stretchr/testify/assert/assertions.go - vendor/github.com/stretchr/testify/assert/doc.go - vendor/github.com/stretchr/testify/assert/errors.go - vendor/github.com/stretchr/testify/assert/forward_assertions.go - vendor/github.com/stretchr/testify/assert/http_assertions.go - vendor/github.com/stretchr/testify/require/doc.go - vendor/github.com/stretchr/testify/require/forward_requirements.go - vendor/github.com/stretchr/testify/require/require.go - vendor/github.com/stretchr/testify/require/require.go.tmpl - vendor/github.com/stretchr/testify/require/require_forward.go - vendor/github.com/stretchr/testify/require/require_forward.go.tmpl - vendor/github.com/stretchr/testify/require/requirements.go - vendor/github.com/tchap/go-patricia/AUTHORS - vendor/github.com/urfave/cli/.flake8 - vendor/github.com/urfave/cli/.gitignore - vendor/github.com/urfave/cli/.travis.yml - vendor/github.com/urfave/cli/CHANGELOG.md - vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md - vendor/github.com/urfave/cli/CONTRIBUTING.md - vendor/github.com/urfave/cli/README.md - vendor/github.com/urfave/cli/appveyor.yml - vendor/github.com/urfave/cli/category.go - vendor/github.com/urfave/cli/cli.go - vendor/github.com/urfave/cli/command.go - vendor/github.com/urfave/cli/context.go - vendor/github.com/urfave/cli/docs.go - vendor/github.com/urfave/cli/errors.go - vendor/github.com/urfave/cli/fish.go - vendor/github.com/urfave/cli/flag.go - vendor/github.com/urfave/cli/flag_bool.go - vendor/github.com/urfave/cli/flag_bool_t.go - vendor/github.com/urfave/cli/flag_duration.go - vendor/github.com/urfave/cli/flag_float64.go - vendor/github.com/urfave/cli/flag_generic.go - vendor/github.com/urfave/cli/flag_int.go - vendor/github.com/urfave/cli/flag_int64.go - vendor/github.com/urfave/cli/flag_int64_slice.go - vendor/github.com/urfave/cli/flag_int_slice.go - vendor/github.com/urfave/cli/flag_string.go - vendor/github.com/urfave/cli/flag_string_slice.go - vendor/github.com/urfave/cli/flag_uint.go - vendor/github.com/urfave/cli/flag_uint64.go - vendor/github.com/urfave/cli/funcs.go - vendor/github.com/urfave/cli/go.mod - vendor/github.com/urfave/cli/go.sum - vendor/github.com/urfave/cli/help.go - vendor/github.com/urfave/cli/parse.go - vendor/github.com/urfave/cli/sort.go - vendor/github.com/urfave/cli/template.go - vendor/go.etcd.io/bbolt/.gitignore - vendor/go.etcd.io/bbolt/.travis.yml - vendor/go.etcd.io/bbolt/Makefile - vendor/go.etcd.io/bbolt/README.md - vendor/go.etcd.io/bbolt/bolt_386.go - vendor/go.etcd.io/bbolt/bolt_amd64.go - vendor/go.etcd.io/bbolt/bolt_arm.go - vendor/go.etcd.io/bbolt/bolt_arm64.go - vendor/go.etcd.io/bbolt/bolt_linux.go - vendor/go.etcd.io/bbolt/bolt_mips64x.go - vendor/go.etcd.io/bbolt/bolt_mipsx.go - vendor/go.etcd.io/bbolt/bolt_openbsd.go - vendor/go.etcd.io/bbolt/bolt_ppc.go - vendor/go.etcd.io/bbolt/bolt_ppc64.go - vendor/go.etcd.io/bbolt/bolt_ppc64le.go - vendor/go.etcd.io/bbolt/bolt_riscv64.go - vendor/go.etcd.io/bbolt/bolt_s390x.go - vendor/go.etcd.io/bbolt/bolt_unix.go - vendor/go.etcd.io/bbolt/bolt_unix_aix.go - vendor/go.etcd.io/bbolt/bolt_unix_solaris.go - vendor/go.etcd.io/bbolt/bolt_windows.go - vendor/go.etcd.io/bbolt/boltsync_unix.go - vendor/go.etcd.io/bbolt/bucket.go - vendor/go.etcd.io/bbolt/cursor.go - vendor/go.etcd.io/bbolt/db.go - vendor/go.etcd.io/bbolt/doc.go - vendor/go.etcd.io/bbolt/errors.go - vendor/go.etcd.io/bbolt/freelist.go - vendor/go.etcd.io/bbolt/freelist_hmap.go - vendor/go.etcd.io/bbolt/go.mod - vendor/go.etcd.io/bbolt/go.sum - vendor/go.etcd.io/bbolt/node.go - vendor/go.etcd.io/bbolt/page.go - vendor/go.etcd.io/bbolt/tx.go - vendor/go.etcd.io/bbolt/unsafe.go - vendor/go.mozilla.org/pkcs7/.gitignore - vendor/go.mozilla.org/pkcs7/.travis.yml - vendor/go.mozilla.org/pkcs7/Makefile - vendor/go.mozilla.org/pkcs7/README.md - vendor/go.mozilla.org/pkcs7/ber.go - vendor/go.mozilla.org/pkcs7/decrypt.go - vendor/go.mozilla.org/pkcs7/encrypt.go - vendor/go.mozilla.org/pkcs7/go.mod - vendor/go.mozilla.org/pkcs7/pkcs7.go - vendor/go.mozilla.org/pkcs7/sign.go - vendor/go.mozilla.org/pkcs7/verify.go - vendor/go.opencensus.io/.gitignore - vendor/go.opencensus.io/.travis.yml - vendor/go.opencensus.io/AUTHORS - vendor/go.opencensus.io/CONTRIBUTING.md - vendor/go.opencensus.io/Makefile - vendor/go.opencensus.io/README.md - vendor/go.opencensus.io/appveyor.yml - vendor/go.opencensus.io/go.mod - vendor/go.opencensus.io/go.sum - vendor/golang.org/x/crypto/AUTHORS - vendor/golang.org/x/crypto/CONTRIBUTORS - vendor/golang.org/x/crypto/PATENTS - vendor/golang.org/x/net/AUTHORS - vendor/golang.org/x/net/CONTRIBUTORS - vendor/golang.org/x/net/PATENTS - vendor/golang.org/x/net/http2/.gitignore - vendor/golang.org/x/net/http2/Dockerfile - vendor/golang.org/x/net/http2/Makefile - vendor/golang.org/x/net/http2/README - vendor/golang.org/x/net/idna/tables10.0.0.go - vendor/golang.org/x/net/idna/tables11.0.0.go - vendor/golang.org/x/net/idna/tables12.0.0.go - vendor/golang.org/x/net/idna/tables13.0.0.go - vendor/golang.org/x/net/idna/tables9.0.0.go - vendor/golang.org/x/net/idna/trieval.go - vendor/golang.org/x/oauth2/.travis.yml - vendor/golang.org/x/oauth2/AUTHORS - vendor/golang.org/x/oauth2/CONTRIBUTING.md - vendor/golang.org/x/oauth2/CONTRIBUTORS - vendor/golang.org/x/oauth2/README.md - vendor/golang.org/x/oauth2/go.mod - vendor/golang.org/x/oauth2/go.sum - vendor/golang.org/x/sync/AUTHORS - vendor/golang.org/x/sync/CONTRIBUTORS - vendor/golang.org/x/sync/PATENTS - vendor/golang.org/x/sys/AUTHORS - vendor/golang.org/x/sys/CONTRIBUTORS - vendor/golang.org/x/sys/PATENTS - vendor/golang.org/x/sys/plan9/const_plan9.go - vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go - vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go - vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go - vendor/golang.org/x/sys/plan9/zsysnum_plan9.go - vendor/golang.org/x/sys/unix/.gitignore - vendor/golang.org/x/sys/unix/README.md - vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go - vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go - vendor/golang.org/x/sys/unix/zerrors_darwin_386.go - vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go - vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go - vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go - vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go - vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go - vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go - vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go - vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go - vendor/golang.org/x/sys/unix/zerrors_linux.go - vendor/golang.org/x/sys/unix/zerrors_linux_386.go - vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go - vendor/golang.org/x/sys/unix/zerrors_linux_arm.go - vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go - vendor/golang.org/x/sys/unix/zerrors_linux_mips.go - vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go - vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go - vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go - vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go - vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go - vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go - vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go - vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go - vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go - vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go - vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go - vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go - vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go - vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go - vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go - vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go - vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go - vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go - vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go - vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go - vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go - vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go - vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go - vendor/golang.org/x/sys/unix/zptrace_x86_linux.go - vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go - vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go - vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go - vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s - vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s - vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s - vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s - vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go - vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go - vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go - vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_linux.go - vendor/golang.org/x/sys/unix/zsyscall_linux_386.go - vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go - vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go - vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go - vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go - vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go - vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go - vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go - vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go - vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go - vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go - vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go - vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go - vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go - vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go - vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go - vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go - vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go - vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go - vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go - vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go - vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go - vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go - vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go - vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go - vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go - vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go - vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go - vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go - vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go - vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go - vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go - vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go - vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go - vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go - vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go - vendor/golang.org/x/sys/unix/zsysnum_linux_386.go - vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go - vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go - vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go - vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go - vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go - vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go - vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go - vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go - vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go - vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go - vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go - vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go - vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go - vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go - vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go - vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go - vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go - vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go - vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go - vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go - vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go - vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go - vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go - vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go - vendor/golang.org/x/sys/unix/ztypes_darwin_386.go - vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go - vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go - vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go - vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go - vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go - vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go - vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go - vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go - vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go - vendor/golang.org/x/sys/unix/ztypes_linux.go - vendor/golang.org/x/sys/unix/ztypes_linux_386.go - vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go - vendor/golang.org/x/sys/unix/ztypes_linux_arm.go - vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go - vendor/golang.org/x/sys/unix/ztypes_linux_mips.go - vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go - vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go - vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go - vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go - vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go - vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go - vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go - vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go - vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go - vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go - vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go - vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go - vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go - vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go - vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go - vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go - vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go - vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go - vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go - vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go - vendor/golang.org/x/sys/windows/zerrors_windows.go - vendor/golang.org/x/sys/windows/zknownfolderids_windows.go - vendor/golang.org/x/sys/windows/zsyscall_windows.go - vendor/golang.org/x/term/AUTHORS - vendor/golang.org/x/term/CONTRIBUTING.md - vendor/golang.org/x/term/CONTRIBUTORS - vendor/golang.org/x/term/PATENTS - vendor/golang.org/x/term/README.md - vendor/golang.org/x/term/go.mod - vendor/golang.org/x/term/go.sum - vendor/golang.org/x/text/AUTHORS - vendor/golang.org/x/text/CONTRIBUTORS - vendor/golang.org/x/text/PATENTS - vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go - vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go - vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go - vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go - vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go - vendor/golang.org/x/text/unicode/bidi/trieval.go - vendor/golang.org/x/text/unicode/norm/tables10.0.0.go - vendor/golang.org/x/text/unicode/norm/tables11.0.0.go - vendor/golang.org/x/text/unicode/norm/tables12.0.0.go - vendor/golang.org/x/text/unicode/norm/tables13.0.0.go - vendor/golang.org/x/text/unicode/norm/tables9.0.0.go - vendor/golang.org/x/time/AUTHORS - vendor/golang.org/x/time/CONTRIBUTORS - vendor/golang.org/x/time/PATENTS - vendor/golang.org/x/xerrors/PATENTS - vendor/golang.org/x/xerrors/README - vendor/golang.org/x/xerrors/codereview.cfg - vendor/golang.org/x/xerrors/go.mod - vendor/google.golang.org/appengine/internal/base/api_base.pb.go - vendor/google.golang.org/appengine/internal/base/api_base.proto - vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go - vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto - vendor/google.golang.org/appengine/internal/log/log_service.pb.go - vendor/google.golang.org/appengine/internal/log/log_service.proto - vendor/google.golang.org/appengine/internal/main_common.go - vendor/google.golang.org/appengine/internal/regen.sh - vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go - vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto - vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go - vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto - vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go - vendor/google.golang.org/grpc/.travis.yml - vendor/google.golang.org/grpc/AUTHORS - vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md - vendor/google.golang.org/grpc/CONTRIBUTING.md - vendor/google.golang.org/grpc/GOVERNANCE.md - vendor/google.golang.org/grpc/Makefile - vendor/google.golang.org/grpc/README.md - vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go - vendor/google.golang.org/grpc/codegen.sh - vendor/google.golang.org/grpc/go.mod - vendor/google.golang.org/grpc/go.sum - vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go - vendor/google.golang.org/grpc/install_gae.sh - vendor/google.golang.org/grpc/vet.sh - vendor/gopkg.in/inf.v0/dec.go - vendor/gopkg.in/inf.v0/rounder.go - vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc - vendor/gopkg.in/square/go-jose.v2/.gitignore - vendor/gopkg.in/square/go-jose.v2/.travis.yml - vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md - vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md - vendor/gopkg.in/square/go-jose.v2/README.md - vendor/gopkg.in/square/go-jose.v2/json/README.md - vendor/gopkg.in/yaml.v2/.travis.yml - vendor/gopkg.in/yaml.v2/README.md - vendor/gopkg.in/yaml.v2/apic.go - vendor/gopkg.in/yaml.v2/decode.go - vendor/gopkg.in/yaml.v2/emitterc.go - vendor/gopkg.in/yaml.v2/encode.go - vendor/gopkg.in/yaml.v2/go.mod - vendor/gopkg.in/yaml.v2/parserc.go - vendor/gopkg.in/yaml.v2/readerc.go - vendor/gopkg.in/yaml.v2/resolve.go - vendor/gopkg.in/yaml.v2/scannerc.go - vendor/gopkg.in/yaml.v2/sorter.go - vendor/gopkg.in/yaml.v2/writerc.go - vendor/gopkg.in/yaml.v2/yaml.go - vendor/gopkg.in/yaml.v2/yamlh.go - vendor/gopkg.in/yaml.v2/yamlprivateh.go - vendor/gopkg.in/yaml.v3/.travis.yml - vendor/gopkg.in/yaml.v3/README.md - vendor/gopkg.in/yaml.v3/go.mod - vendor/gotest.tools/v3/assert/assert.go - vendor/gotest.tools/v3/assert/cmp/compare.go - vendor/gotest.tools/v3/assert/cmp/result.go - vendor/gotest.tools/v3/internal/assert/assert.go - vendor/gotest.tools/v3/internal/assert/result.go - vendor/gotest.tools/v3/internal/difflib/difflib.go - vendor/gotest.tools/v3/internal/format/diff.go - vendor/gotest.tools/v3/internal/format/format.go - vendor/gotest.tools/v3/internal/source/defers.go - vendor/gotest.tools/v3/internal/source/source.go - vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS - vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS - vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS - vendor/k8s.io/client-go/pkg/version/.gitattributes - vendor/k8s.io/client-go/rest/OWNERS - vendor/k8s.io/client-go/tools/metrics/OWNERS - vendor/k8s.io/client-go/transport/OWNERS - vendor/k8s.io/client-go/util/cert/OWNERS - vendor/k8s.io/client-go/util/keyutil/OWNERS - vendor/k8s.io/klog/v2/.gitignore - vendor/k8s.io/klog/v2/CONTRIBUTING.md - vendor/k8s.io/klog/v2/OWNERS - vendor/k8s.io/klog/v2/README.md - vendor/k8s.io/klog/v2/RELEASE.md - vendor/k8s.io/klog/v2/SECURITY.md - vendor/k8s.io/klog/v2/SECURITY_CONTACTS - vendor/k8s.io/klog/v2/code-of-conduct.md - vendor/k8s.io/klog/v2/go.mod - vendor/k8s.io/klog/v2/go.sum - vendor/k8s.io/utils/exec/README.md - vendor/modules.txt - vendor/sigs.k8s.io/yaml/.gitignore - vendor/sigs.k8s.io/yaml/.travis.yml - vendor/sigs.k8s.io/yaml/CONTRIBUTING.md - vendor/sigs.k8s.io/yaml/OWNERS - vendor/sigs.k8s.io/yaml/README.md - vendor/sigs.k8s.io/yaml/RELEASE.md - vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS - vendor/sigs.k8s.io/yaml/code-of-conduct.md - vendor/sigs.k8s.io/yaml/go.mod - vendor/sigs.k8s.io/yaml/go.sum - vendor/sigs.k8s.io/yaml/yaml.go - vendor/sigs.k8s.io/yaml/yaml_go110.go -Copyright: __NO_COPYRIGHT_NOR_LICENSE__ -License: __NO_COPYRIGHT_NOR_LICENSE__ - -Files: vendor/github.com/docker/spdystream/spdy/dictionary.go - vendor/github.com/docker/spdystream/spdy/read.go - vendor/github.com/docker/spdystream/spdy/write.go - vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go - vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go - vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go - vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go - vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go - vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go - vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go - vendor/github.com/google/go-cmp/cmp/internal/value/name.go - vendor/github.com/google/go-cmp/cmp/internal/value/sort.go - vendor/github.com/google/go-cmp/cmp/internal/value/zero.go - vendor/github.com/google/go-cmp/cmp/options.go - vendor/github.com/google/go-cmp/cmp/path.go - vendor/github.com/google/go-cmp/cmp/report.go - vendor/github.com/google/go-cmp/cmp/report_compare.go - vendor/github.com/google/go-cmp/cmp/report_references.go - vendor/github.com/google/go-cmp/cmp/report_reflect.go - vendor/github.com/google/go-cmp/cmp/report_slices.go - vendor/github.com/google/go-cmp/cmp/report_text.go - vendor/github.com/google/go-cmp/cmp/report_value.go - vendor/github.com/google/uuid/dce.go - vendor/github.com/google/uuid/hash.go - vendor/github.com/google/uuid/marshal.go - vendor/github.com/google/uuid/node.go - vendor/github.com/google/uuid/sql.go - vendor/github.com/google/uuid/time.go - vendor/github.com/google/uuid/util.go - vendor/github.com/google/uuid/uuid.go - vendor/github.com/google/uuid/version1.go - vendor/github.com/google/uuid/version4.go - vendor/github.com/klauspost/compress/snappy/decode.go - vendor/github.com/klauspost/compress/snappy/encode.go - vendor/github.com/miekg/pkcs11/const.go - vendor/github.com/miekg/pkcs11/error.go - vendor/github.com/miekg/pkcs11/params.go - vendor/github.com/miekg/pkcs11/types.go - vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go - vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go - vendor/golang.org/x/crypto/openpgp/armor/encode.go - vendor/golang.org/x/crypto/openpgp/canonical_text.go - vendor/golang.org/x/crypto/openpgp/keys.go - vendor/golang.org/x/crypto/openpgp/packet/compressed.go - vendor/golang.org/x/crypto/openpgp/packet/config.go - vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go - vendor/golang.org/x/crypto/openpgp/packet/literal.go - vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go - vendor/golang.org/x/crypto/openpgp/packet/opaque.go - vendor/golang.org/x/crypto/openpgp/packet/private_key.go - vendor/golang.org/x/crypto/openpgp/packet/public_key.go - vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go - vendor/golang.org/x/crypto/openpgp/packet/reader.go - vendor/golang.org/x/crypto/openpgp/packet/signature.go - vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go - vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go - vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go - vendor/golang.org/x/crypto/openpgp/packet/userattribute.go - vendor/golang.org/x/crypto/openpgp/packet/userid.go - vendor/golang.org/x/crypto/openpgp/write.go - vendor/golang.org/x/net/http/httpguts/httplex.go - vendor/golang.org/x/net/http2/ciphers.go - vendor/golang.org/x/net/http2/databuffer.go - vendor/golang.org/x/net/http2/errors.go - vendor/golang.org/x/net/http2/frame.go - vendor/golang.org/x/net/http2/headermap.go - vendor/golang.org/x/net/http2/hpack/encode.go - vendor/golang.org/x/net/http2/hpack/huffman.go - vendor/golang.org/x/net/http2/hpack/tables.go - vendor/golang.org/x/net/http2/pipe.go - vendor/golang.org/x/net/http2/write.go - vendor/golang.org/x/net/http2/writesched.go - vendor/golang.org/x/net/http2/writesched_priority.go - vendor/golang.org/x/net/http2/writesched_random.go - vendor/golang.org/x/net/idna/punycode.go - vendor/golang.org/x/net/idna/trie.go - vendor/golang.org/x/net/trace/events.go - vendor/golang.org/x/net/trace/histogram.go - vendor/golang.org/x/net/websocket/client.go - vendor/golang.org/x/net/websocket/dial.go - vendor/golang.org/x/net/websocket/hybi.go - vendor/golang.org/x/net/websocket/server.go - vendor/golang.org/x/oauth2/internal/oauth2.go - vendor/golang.org/x/oauth2/internal/token.go - vendor/golang.org/x/oauth2/internal/transport.go - vendor/golang.org/x/oauth2/token.go - vendor/golang.org/x/oauth2/transport.go - vendor/golang.org/x/sys/plan9/asm.s - vendor/golang.org/x/sys/plan9/asm_plan9_386.s - vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s - vendor/golang.org/x/sys/plan9/asm_plan9_arm.s - vendor/golang.org/x/sys/plan9/errors_plan9.go - vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh - vendor/golang.org/x/sys/unix/errors_freebsd_arm.go - vendor/golang.org/x/sys/unix/fcntl_darwin.go - vendor/golang.org/x/sys/unix/ioctl_linux.go - vendor/golang.org/x/sys/unix/pledge_openbsd.go - vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go - vendor/golang.org/x/sys/unix/syscall_darwin_arm.go - vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go - vendor/golang.org/x/sys/unix/unveil_openbsd.go - vendor/golang.org/x/sys/windows/dll_windows.go - vendor/golang.org/x/sys/windows/memory_windows.go - vendor/golang.org/x/sys/windows/mkerrors.bash - vendor/golang.org/x/sys/windows/mkknownfolderids.bash - vendor/golang.org/x/sys/windows/security_windows.go - vendor/golang.org/x/sys/windows/setupapierrors_windows.go - vendor/golang.org/x/sys/windows/svc/sys_windows_arm.s - vendor/golang.org/x/sys/windows/svc/sys_windows_arm64.s - vendor/golang.org/x/sys/windows/types_windows.go - vendor/golang.org/x/sys/windows/types_windows_386.go - vendor/golang.org/x/sys/windows/types_windows_amd64.go - vendor/golang.org/x/sys/windows/types_windows_arm.go - vendor/golang.org/x/sys/windows/types_windows_arm64.go - vendor/golang.org/x/term/term_plan9.go - vendor/golang.org/x/term/term_solaris.go - vendor/golang.org/x/term/term_unix_aix.go - vendor/golang.org/x/term/term_unix_linux.go - vendor/golang.org/x/term/term_unix_zos.go - vendor/golang.org/x/term/term_windows.go - vendor/golang.org/x/term/terminal.go - vendor/golang.org/x/text/unicode/bidi/bidi.go - vendor/golang.org/x/text/unicode/bidi/bracket.go - vendor/golang.org/x/text/unicode/bidi/core.go - vendor/golang.org/x/text/unicode/bidi/prop.go - vendor/golang.org/x/text/unicode/norm/composition.go - vendor/golang.org/x/text/unicode/norm/forminfo.go - vendor/golang.org/x/text/unicode/norm/input.go - vendor/golang.org/x/text/unicode/norm/iter.go - vendor/golang.org/x/text/unicode/norm/normalize.go - vendor/golang.org/x/text/unicode/norm/readwriter.go - vendor/golang.org/x/text/unicode/norm/transform.go - vendor/golang.org/x/text/unicode/norm/trie.go - vendor/golang.org/x/xerrors/adaptor.go - vendor/golang.org/x/xerrors/errors.go - vendor/golang.org/x/xerrors/fmt.go - vendor/golang.org/x/xerrors/format.go - vendor/golang.org/x/xerrors/frame.go - vendor/golang.org/x/xerrors/internal/internal.go - vendor/golang.org/x/xerrors/wrap.go - vendor/gopkg.in/square/go-jose.v2/json/indent.go - vendor/gopkg.in/square/go-jose.v2/json/scanner.go - vendor/gopkg.in/square/go-jose.v2/json/stream.go - vendor/gopkg.in/square/go-jose.v2/json/tags.go - vendor/sigs.k8s.io/yaml/fields.go -Copyright: 2009-2021 The Go Authors. - 2011 The Snappy-Go Authors. - 2013 Miek Gieben. - 2016-2018 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - -Files: protobuf/plugin/fieldpath.proto - vendor/github.com/gogo/protobuf/gogoproto/Makefile - vendor/github.com/gogo/protobuf/gogoproto/gogo.proto - vendor/github.com/gogo/protobuf/gogoproto/helper.go - vendor/github.com/gogo/protobuf/plugin/compare/compare.go - vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go - vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go - vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go - vendor/github.com/gogo/protobuf/plugin/face/facetest.go - vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go - vendor/github.com/gogo/protobuf/plugin/size/sizetest.go - vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go - vendor/github.com/gogo/protobuf/plugin/union/uniontest.go - vendor/github.com/gogo/protobuf/proto/custom_gogo.go - vendor/github.com/gogo/protobuf/proto/duration_gogo.go - vendor/github.com/gogo/protobuf/proto/encode_gogo.go - vendor/github.com/gogo/protobuf/proto/extensions_gogo.go - vendor/github.com/gogo/protobuf/proto/lib_gogo.go - vendor/github.com/gogo/protobuf/proto/properties_gogo.go - vendor/github.com/gogo/protobuf/proto/skip_gogo.go - vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go - vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go - vendor/github.com/gogo/protobuf/proto/text_gogo.go - vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go - vendor/github.com/gogo/protobuf/proto/wrappers.go - vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go - vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go - vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go - vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go - vendor/github.com/gogo/protobuf/types/duration_gogo.go - vendor/github.com/gogo/protobuf/types/timestamp_gogo.go - vendor/github.com/gogo/protobuf/types/wrappers_gogo.go - vendor/github.com/gogo/protobuf/vanity/command/command.go - vendor/github.com/gogo/protobuf/vanity/enum.go - vendor/github.com/gogo/protobuf/vanity/field.go - vendor/github.com/gogo/protobuf/vanity/file.go - vendor/github.com/gogo/protobuf/vanity/foreach.go - vendor/github.com/gogo/protobuf/vanity/msg.go -Copyright: 2013-2018 The GoGo Authors. http://github.com/gogo/protobuf - 2015 The GoGo Authors. rights reserved. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Files: vendor/github.com/gogo/protobuf/proto/Makefile - vendor/github.com/gogo/protobuf/proto/decode.go - vendor/github.com/gogo/protobuf/proto/deprecated.go - vendor/github.com/gogo/protobuf/proto/discard.go - vendor/github.com/gogo/protobuf/proto/duration.go - vendor/github.com/gogo/protobuf/proto/encode.go - vendor/github.com/gogo/protobuf/proto/extensions.go - vendor/github.com/gogo/protobuf/proto/message_set.go - vendor/github.com/gogo/protobuf/proto/table_marshal.go - vendor/github.com/gogo/protobuf/proto/table_merge.go - vendor/github.com/gogo/protobuf/proto/table_unmarshal.go - vendor/github.com/gogo/protobuf/proto/timestamp.go - vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile - vendor/github.com/gogo/protobuf/types/any.go - vendor/github.com/gogo/protobuf/types/duration.go - vendor/github.com/gogo/protobuf/types/timestamp.go - vendor/github.com/golang/protobuf/proto/decode.go - vendor/github.com/golang/protobuf/proto/deprecated.go - vendor/github.com/golang/protobuf/proto/discard.go - vendor/github.com/golang/protobuf/proto/encode.go - vendor/github.com/golang/protobuf/proto/extensions.go - vendor/github.com/golang/protobuf/proto/message_set.go - vendor/github.com/golang/protobuf/proto/properties.go - vendor/github.com/golang/protobuf/proto/table_marshal.go - vendor/github.com/golang/protobuf/proto/table_merge.go - vendor/github.com/golang/protobuf/proto/table_unmarshal.go - vendor/github.com/golang/protobuf/proto/text.go - vendor/github.com/golang/protobuf/proto/text_parser.go - vendor/github.com/golang/protobuf/ptypes/any.go - vendor/github.com/golang/protobuf/ptypes/any/any.proto - vendor/github.com/golang/protobuf/ptypes/duration.go - vendor/github.com/golang/protobuf/ptypes/duration/duration.proto - vendor/github.com/golang/protobuf/ptypes/timestamp.go - vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - vendor/github.com/moby/sys/symlink/LICENSE.BSD -Copyright: 2008 Google Inc. https://developers.google.com/protocol-buffers/ - 2010-2018 The Go Authors. https://github.com/golang/protobuf - 2014-2018 The Docker & Go Authors. -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: integration/remote/remote_image.go - integration/remote/remote_runtime.go - integration/remote/util/util_unix.go - integration/remote/util/util_unsupported.go - integration/remote/util/util_windows.go - integration/remote/utils.go - integration/util/boottime_util_darwin.go - integration/util/boottime_util_linux.go - integration/util/util.go - integration/util/util_unix.go - integration/util/util_unsupported.go - integration/util/util_windows.go - pkg/cri/server/bandwidth/fake_shaper.go - pkg/cri/server/bandwidth/interfaces.go - pkg/cri/server/bandwidth/linux.go - pkg/cri/server/bandwidth/unsupported.go - pkg/cri/server/bandwidth/utils.go - pkg/cri/streaming/errors.go - pkg/cri/streaming/portforward/httpstream.go - pkg/cri/streaming/portforward/portforward.go - pkg/cri/streaming/portforward/websocket.go - pkg/cri/streaming/remotecommand/attach.go - pkg/cri/streaming/remotecommand/exec.go - pkg/cri/streaming/remotecommand/httpstream.go - pkg/cri/streaming/remotecommand/websocket.go - pkg/cri/streaming/request_cache.go - pkg/cri/streaming/server.go - pkg/netns/netns_linux.go - pkg/seccomp/seccomp_linux.go - test/init-buildx.sh -Copyright: 2015-2020 The Kubernetes Authors. - 2018 CNI authors - The containerd Authors. - The runc Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - . - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/klauspost/compress/zstd/bitreader.go - vendor/github.com/klauspost/compress/zstd/blockdec.go - vendor/github.com/klauspost/compress/zstd/blockenc.go - vendor/github.com/klauspost/compress/zstd/bytebuf.go - vendor/github.com/klauspost/compress/zstd/bytereader.go - vendor/github.com/klauspost/compress/zstd/decoder.go - vendor/github.com/klauspost/compress/zstd/decoder_options.go - vendor/github.com/klauspost/compress/zstd/enc_best.go - vendor/github.com/klauspost/compress/zstd/enc_better.go - vendor/github.com/klauspost/compress/zstd/enc_dfast.go - vendor/github.com/klauspost/compress/zstd/enc_fast.go - vendor/github.com/klauspost/compress/zstd/encoder.go - vendor/github.com/klauspost/compress/zstd/framedec.go - vendor/github.com/klauspost/compress/zstd/frameenc.go - vendor/github.com/klauspost/compress/zstd/fse_decoder.go - vendor/github.com/klauspost/compress/zstd/fse_encoder.go - vendor/github.com/klauspost/compress/zstd/fse_predefined.go - vendor/github.com/klauspost/compress/zstd/hash.go - vendor/github.com/klauspost/compress/zstd/history.go - vendor/github.com/klauspost/compress/zstd/seqdec.go - vendor/github.com/klauspost/compress/zstd/seqenc.go - vendor/github.com/klauspost/compress/zstd/snappy.go -Copyright: 2019 + Klaus Post. -License: __UNKNOWN__ - License information can be found in the LICENSE file. - Based on work by Yann Collet, released under BSD License. - -Files: vendor/github.com/emicklei/go-restful/compress.go - vendor/github.com/emicklei/go-restful/compressor_cache.go - vendor/github.com/emicklei/go-restful/compressor_pools.go - vendor/github.com/emicklei/go-restful/compressors.go - vendor/github.com/emicklei/go-restful/constants.go - vendor/github.com/emicklei/go-restful/container.go - vendor/github.com/emicklei/go-restful/cors_filter.go - vendor/github.com/emicklei/go-restful/curly.go - vendor/github.com/emicklei/go-restful/entity_accessors.go - vendor/github.com/emicklei/go-restful/jsr311.go - vendor/github.com/emicklei/go-restful/logger.go - vendor/github.com/emicklei/go-restful/parameter.go - vendor/github.com/emicklei/go-restful/path_expression.go - vendor/github.com/emicklei/go-restful/request.go - vendor/github.com/emicklei/go-restful/response.go - vendor/github.com/emicklei/go-restful/route.go - vendor/github.com/emicklei/go-restful/route_builder.go - vendor/github.com/emicklei/go-restful/router.go - vendor/github.com/emicklei/go-restful/service_error.go - vendor/github.com/emicklei/go-restful/web_service_container.go -Copyright: 2013-2015 Ernest Micklei. -License: __UNKNOWN__ - Use of this source code is governed by a license - that can be found in the LICENSE file. - -Files: vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go - vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go - vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go - vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go - vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go - vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go - vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by deepcopy-gen. DO NOT EDIT. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/fsnotify/fsnotify/windows.go - vendor/golang.org/x/sys/windows/eventlog.go - vendor/golang.org/x/sys/windows/registry/syscall.go - vendor/golang.org/x/sys/windows/registry/value.go - vendor/golang.org/x/sys/windows/service.go - vendor/golang.org/x/sys/windows/str.go - vendor/golang.org/x/sys/windows/svc/debug/log.go - vendor/golang.org/x/sys/windows/svc/event.go - vendor/golang.org/x/sys/windows/svc/mgr/config.go - vendor/golang.org/x/sys/windows/svc/mgr/recovery.go - vendor/golang.org/x/sys/windows/svc/mgr/service.go - vendor/golang.org/x/sys/windows/svc/security.go -Copyright: 2009-2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - -Files: vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md - vendor/github.com/sirupsen/logrus/alt_exit.go - vendor/gopkg.in/yaml.v2/LICENSE.libyaml - vendor/gopkg.in/yaml.v3/apic.go - vendor/gopkg.in/yaml.v3/emitterc.go - vendor/gopkg.in/yaml.v3/parserc.go - vendor/gopkg.in/yaml.v3/readerc.go - vendor/gopkg.in/yaml.v3/scannerc.go - vendor/gopkg.in/yaml.v3/writerc.go - vendor/gopkg.in/yaml.v3/yamlh.go - vendor/gopkg.in/yaml.v3/yamlprivateh.go -Copyright: 2006-2010 Kirill Simonov - 2011-2019 Canonical Ltd - 2012 Miki Tebeka . - 2014 Brian Goff -License: Expat - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - . - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - . - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -Files: vendor/github.com/Microsoft/hcsshim/README.md - vendor/github.com/docker/go-events/README.md - vendor/github.com/emicklei/go-restful/README.md - vendor/github.com/emicklei/go-restful/doc.go - vendor/github.com/gogo/googleapis/google/rpc/code.pb.go - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go - vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go - vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE - vendor/github.com/russross/blackfriday/v2/markdown.go -Copyright: 2011 Russ Ross . Distributed under the Simplified BSD License. See README.md for details. - 2012 Matt T. Proud (matt.proud@gmail.com) - 2012-2015 http://ernestmicklei.com. MIT License - 2012-2018 http://ernestmicklei.com. MIT License. Contributions are welcome. - 2016 Docker, Inc. go-events is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. - 2016 Michal Witkowski. See LICENSE for licensing terms. - 2018 Microsoft Corp. - Use `FAILED_PRECONDITION` if the client should not retry until the system state has been explicitly fixed. E.g., if an "rmdir" -License: __NO_LICENSE__ - -Files: vendor/github.com/klauspost/compress/fse/bitreader.go - vendor/github.com/klauspost/compress/fse/bitwriter.go - vendor/github.com/klauspost/compress/fse/bytereader.go - vendor/github.com/klauspost/compress/fse/compress.go - vendor/github.com/klauspost/compress/huff0/bitreader.go - vendor/github.com/klauspost/compress/huff0/bitwriter.go - vendor/github.com/klauspost/compress/huff0/bytereader.go - vendor/github.com/klauspost/compress/zstd/bitwriter.go -Copyright: 2018 Klaus Post. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -Files: vendor/golang.org/x/sys/unix/epoll_zos.go - vendor/golang.org/x/sys/unix/fstatfs_zos.go - vendor/golang.org/x/sys/unix/ioctl_zos.go - vendor/golang.org/x/sys/unix/syscall_zos_s390x.go - vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go - vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go - vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go -Copyright: 2020 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build zos && s390x - +build zos,s390x - -Files: vendor/k8s.io/apimachinery/pkg/util/sets/byte.go - vendor/k8s.io/apimachinery/pkg/util/sets/empty.go - vendor/k8s.io/apimachinery/pkg/util/sets/int.go - vendor/k8s.io/apimachinery/pkg/util/sets/int32.go - vendor/k8s.io/apimachinery/pkg/util/sets/int64.go - vendor/k8s.io/apimachinery/pkg/util/sets/string.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by set-gen. DO NOT EDIT. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/api/core/v1/generated.proto - vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto - vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - vendor/k8s.io/apimachinery/pkg/runtime/generated.proto - vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto - vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This file was autogenerated by go-to-protobuf. Do not edit it manually! - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/golang.org/x/sys/unix/asm_aix_ppc64.s - vendor/golang.org/x/sys/unix/asm_linux_386.s - vendor/golang.org/x/sys/unix/asm_linux_amd64.s - vendor/golang.org/x/sys/unix/asm_linux_arm.s - vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s - vendor/golang.org/x/sys/unix/asm_solaris_amd64.s -Copyright: 2009-2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build gc - +build gc - -Files: vendor/google.golang.org/appengine/internal/api_common.go - vendor/google.golang.org/appengine/internal/app_id.go - vendor/google.golang.org/appengine/internal/identity.go - vendor/google.golang.org/appengine/internal/metadata.go - vendor/google.golang.org/appengine/internal/net.go - vendor/google.golang.org/appengine/internal/transaction.go -Copyright: 2011-2015 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by the Apache 2.0 - license that can be found in the LICENSE file. - -Files: vendor/github.com/davecgh/go-spew/spew/common.go - vendor/github.com/davecgh/go-spew/spew/config.go - vendor/github.com/davecgh/go-spew/spew/dump.go - vendor/github.com/davecgh/go-spew/spew/format.go - vendor/github.com/davecgh/go-spew/spew/spew.go -Copyright: 2013-2016 Dave Collins -License: ISC - Permission to use, copy, modify, and distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - . - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -Files: vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go - vendor/github.com/prometheus/procfs/kernel_random.go - vendor/github.com/prometheus/procfs/proc_smaps.go - vendor/github.com/prometheus/procfs/vm.go - vendor/github.com/prometheus/procfs/zoneinfo.go -Copyright: 2019-2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build !windows - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/opencontainers/go-digest/algorithm.go - vendor/github.com/opencontainers/go-digest/digest.go - vendor/github.com/opencontainers/go-digest/digester.go - vendor/github.com/opencontainers/go-digest/digestset/set.go - vendor/github.com/opencontainers/go-digest/verifiers.go -Copyright: 2017 Docker, Inc. - 2019-2020 OCI Contributors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/golang.org/x/sys/unix/dirent.go - vendor/golang.org/x/sys/unix/fdset.go - vendor/golang.org/x/sys/unix/ioctl.go - vendor/golang.org/x/sys/unix/str.go - vendor/golang.org/x/sys/unix/syscall_unix.go -Copyright: 2009-2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -Files: vendor/github.com/klauspost/compress/snappy/decode_amd64.go - vendor/github.com/klauspost/compress/snappy/decode_amd64.s - vendor/github.com/klauspost/compress/snappy/encode_amd64.go - vendor/github.com/klauspost/compress/snappy/encode_amd64.s -Copyright: 2016 The Go Authors. - 2016 The Snappy-Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !appengine - +build gc - +build !noasm - -Files: vendor/github.com/docker/go-events/CONTRIBUTING.md - vendor/github.com/docker/go-metrics/CONTRIBUTING.md - vendor/github.com/docker/go-units/CONTRIBUTING.md - vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md -Copyright: 660 York Street, Suite 102, San Francisco, CA 94110 USA - 2004-2006 The Linux Foundation and its contributors. - The contribution was provided directly to me some other person who certified (a), (b) or (c) and I have not modified it. - 9999-94129 1 Letterman Drive Suite D4700 San Francisco, CA, -License: __UNKNOWN__ - Everyone is permitted to copy and distribute verbatim copies of this - license document, but changing it is not allowed. - . - Developer's Certificate of Origin 1.1 - . - By making a contribution to this project, I certify that: - . - (a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - . - (b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - . - (d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. - ``` - . - Then you just add a line to every git commit message: - . - Signed-off-by: Joe Smith - . - Use your real name (sorry, no pseudonyms or anonymous contributions.) - . - If you set your `user.name` and `user.email` git configs, you can sign your - commit automatically with `git commit -s`. - -Files: vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by conversion-gen. DO NOT EDIT. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go - vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by defaulter-gen. DO NOT EDIT. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/appengine/internal/api.go - vendor/google.golang.org/appengine/internal/identity_vm.go - vendor/google.golang.org/appengine/internal/main_vm.go -Copyright: 2011 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by the Apache 2.0 - license that can be found in the LICENSE file. - . - +build !appengine - -Files: vendor/github.com/prometheus/client_model/NOTICE - vendor/github.com/prometheus/common/NOTICE - vendor/github.com/prometheus/procfs/NOTICE -Copyright: 2012-2015 The Prometheus Authors -License: __UNKNOWN__ - This product includes software developed at - SoundCloud Ltd. (http://soundcloud.com/). - -Files: vendor/github.com/imdario/mergo/map.go - vendor/github.com/imdario/mergo/merge.go - vendor/github.com/imdario/mergo/mergo.go -Copyright: 2009 The Go Authors. - 2013-2014 Dario Castañé. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Based on src/pkg/reflect/deepequal.go from official - golang's stdlib. - -Files: vendor/golang.org/x/sys/unix/asm_bsd_386.s - vendor/golang.org/x/sys/unix/asm_bsd_arm.s - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s -Copyright: 2021 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build (darwin || freebsd || netbsd || openbsd) && gc - +build darwin freebsd netbsd openbsd - +build gc - -Files: vendor/google.golang.org/appengine/internal/api_classic.go - vendor/google.golang.org/appengine/internal/identity_classic.go - vendor/google.golang.org/appengine/internal/main.go -Copyright: 2011-2015 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by the Apache 2.0 - license that can be found in the LICENSE file. - . - +build appengine - -Files: vendor/golang.org/x/sys/unix/errors_freebsd_386.go - vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go - vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go -Copyright: 2017-2020 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Constants that were deprecated or moved to enums in the FreeBSD headers. Keep - them here for backwards compatibility. - -Files: vendor/github.com/gogo/protobuf/proto/properties.go - vendor/github.com/gogo/protobuf/proto/text.go - vendor/github.com/gogo/protobuf/proto/text_parser.go -Copyright: 2010 The Go Authors. https://github.com/golang/protobuf - 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-3-Clause - Go support for Protocol Buffers - Google's data interchange format - . - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/docker/go-metrics/LICENSE.docs - vendor/github.com/docker/spdystream/LICENSE.docs - vendor/github.com/opencontainers/go-digest/LICENSE.docs -Copyright: __NO_COPYRIGHT__ in: vendor/github.com/docker/go-metrics/LICENSE.docs - __NO_COPYRIGHT__ in: vendor/github.com/docker/spdystream/LICENSE.docs - __NO_COPYRIGHT__ in: vendor/github.com/opencontainers/go-digest/LICENSE.docs -License: __UNKNOWN__ with unknown exception - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - -Files: vendor/github.com/cespare/xxhash/v2/LICENSE.txt - vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt -Copyright: 2016 Caleb Spare -License: Expat - MIT License - . - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - . - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - . - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -Files: api/services/events/v1/doc.go - api/services/ttrpc/events/v1/doc.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package events defines the event pushing and subscription service. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containerd/console/tc_openbsd_nocgo.go - vendor/github.com/containerd/console/tc_solaris_nocgo.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Implementing the functions below requires cgo support. Non-cgo stubs - versions are defined below to enable cross-compilation of source code - that depends on these functions, but the resultant cross-compiled - binaries cannot actually be used. If the stub function(s) below are - actually invoked they will display an error message and cause the - calling process to exit. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/cpuinfo.go - vendor/github.com/prometheus/procfs/cpuinfo_s390x.go -Copyright: 2019-2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/golang.org/x/sys/windows/mksyscall.go - vendor/golang.org/x/sys/windows/registry/mksyscall.go -Copyright: 2009-2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build generate - -Files: vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go - vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go -Copyright: 2016-2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !go1.10 - -Files: vendor/golang.org/x/sys/unix/constants.go - vendor/golang.org/x/sys/unix/timestruct.go -Copyright: 2015-2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - -Files: vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE - vendor/github.com/opencontainers/runc/NOTICE -Copyright: 2012-2015 Docker, Inc. -License: __UNKNOWN__ - This product includes software developed at Docker, Inc. (http://www.docker.com). - . - The following is courtesy of our legal counsel: - . - Use and transfer of Docker may be subject to certain restrictions by the - United States and other governments. - It is your responsibility to ensure that your use and/or transfer does not - violate applicable laws. - . - For more information, please see http://www.bis.doc.gov - . - See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. - -Files: vendor/github.com/google/go-cmp/cmp/export_panic.go - vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go -Copyright: 2017-2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build purego - -Files: vendor/golang.org/x/sys/plan9/mkerrors.sh - vendor/golang.org/x/sys/unix/mkerrors.sh -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Generate Go code listing errors and other #defined constant - values (ENAMETOOLONG etc.), by asking the preprocessor - about the definitions. - -Files: vendor/github.com/klauspost/compress/snappy/decode_other.go - vendor/github.com/klauspost/compress/snappy/encode_other.go -Copyright: 2016 The Snappy-Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !amd64 appengine !gc noasm - -Files: vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go - vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go -Copyright: 2016-2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build go1.10 - -Files: NOTICE - vendor/github.com/docker/go-metrics/NOTICE -Copyright: 2012-2015 Docker, Inc. -License: __UNKNOWN__ - This product includes software developed at Docker, Inc. (https://www.docker.com). - . - The following is courtesy of our legal counsel: - . - Use and transfer of Docker may be subject to certain restrictions by the - United States and other governments. - It is your responsibility to ensure that your use and/or transfer does not - violate applicable laws. - . - For more information, please see https://www.bis.doc.gov - . - See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. - -Files: vendor/github.com/tchap/go-patricia/patricia/children.go - vendor/github.com/tchap/go-patricia/patricia/patricia.go -Copyright: 2014 The go-patricia AUTHORS -License: __UNKNOWN__ - Use of this source code is governed by The MIT License - that can be found in the LICENSE file. - -Files: vendor/github.com/fsnotify/fsnotify/inotify.go - vendor/github.com/fsnotify/fsnotify/inotify_poller.go -Copyright: 2010-2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build linux - -Files: vendor/github.com/google/go-cmp/cmp/export_unsafe.go - vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go -Copyright: 2017-2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !purego - -Files: vendor/golang.org/x/sys/windows/svc/sys_windows_386.s - vendor/golang.org/x/sys/windows/svc/sys_windows_amd64.s -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - func servicemain(argc uint32, argv **uint16) - -Files: vendor/github.com/gogo/protobuf/proto/lib.go - vendor/github.com/golang/protobuf/proto/lib.go -Copyright: 2010 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Package proto converts data structures to and from the wire format of - protocol buffers. It works in concert with the Go source code generated - for .proto files by the protocol compiler. - . - A summary of the properties of the protocol buffer interface - for a protocol buffer variable v: - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/proto/pointer_reflect.go - vendor/github.com/golang/protobuf/proto/pointer_reflect.go -Copyright: 2012 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - +build purego appengine js - . - This file contains an implementation of proto field accesses using package reflect. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/proto/equal.go - vendor/github.com/golang/protobuf/proto/equal.go -Copyright: 2011 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Protocol buffer comparison. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go - vendor/github.com/golang/protobuf/proto/pointer_unsafe.go -Copyright: 2012 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - +build !purego,!appengine,!js - . - This file contains the implementation of the proto field accesses using package unsafe. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/proto/clone.go - vendor/github.com/golang/protobuf/proto/clone.go -Copyright: 2011 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Protocol buffer deep copy and merge. - TODO: RawMessage. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/davecgh/go-spew/spew/bypass.go -Copyright: 2015-2016 Dave Collins -License: ISC - Permission to use, copy, modify, and distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - . - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - . - NOTE: Due to the following build constraints, this file will only be compiled - when the code is not running on Google App Engine, compiled by GopherJS, and - "-tags safe" is not added to the go build command line. The "disableunsafe" - tag is deprecated and thus should not be used. - Go versions prior to 1.4 are disabled because they use a different layout - for interfaces which make the implementation of unsafeReflectValue more complex. - +build !js,!appengine,!safe,!disableunsafe,go1.4 - -Files: vendor/github.com/davecgh/go-spew/spew/bypasssafe.go -Copyright: 2015-2016 Dave Collins -License: ISC - Permission to use, copy, modify, and distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - . - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - . - NOTE: Due to the following build constraints, this file will only be compiled - when the code is running on Google App Engine, compiled by GopherJS, or - "-tags safe" is added to the go build command line. The "disableunsafe" - tag is deprecated and thus should not be used. - +build js appengine safe disableunsafe !go1.4 - -Files: vendor/github.com/davecgh/go-spew/spew/doc.go -Copyright: 2013-2016 Dave Collins -License: ISC - Permission to use, copy, modify, and distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - . - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - . - Package spew implements a deep pretty printer for Go data structures to aid in - debugging. - . - A quick overview of the additional features spew provides over the built-in - printing facilities for Go data types are as follows: - . - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - . - There are two different approaches spew allows for dumping Go data structures: - . - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - . - Quick Start - . - This section demonstrates how to quickly get started with spew. See the - sections below for further details on formatting and configuration options. - . - To dump a variable with full newlines, indentation, type, and pointer - information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - -Files: vendor/google.golang.org/grpc/internal/binarylog/binarylog.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package binarylog implementation binary logging as defined in - https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/balancer/balancer.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package balancer defines APIs for load balancing in gRPC. - All APIs in this package are experimental. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: pkg/cap/cap_linux.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package cap provides Linux capability utility - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.pb.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by protoc-gen-gogo. DO NOT EDIT. - source: api.proto - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/conversion/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package conversion provides go object versioning. - . - Specifically, conversion provides a way for you to define multiple versions - of the same object. You may write functions which implement conversion logic, - but for the fields which did not change, copying is automated. This makes it - easy to modify the structures you use in memory without affecting the format - you store on disk or respond to in your external API calls. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/naming/naming.go -Copyright: 2014 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package naming defines the naming API and related data structures for gRPC. - . - This package is deprecated: please use package resolver instead. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: test/build-test-images.sh -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This script is used to build and upload images in integration/images - directory to gcr.io/k8s-cri-containerd repository - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by protoc-gen-gogo. DO NOT EDIT. - source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/klog/v2/klog.go -Copyright: 2013 Google Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. - It provides functions Info, Warning, Error, Fatal, plus formatting variants such as - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/cpuinfo_others.go -Copyright: 2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux - +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go -Copyright: 2015 CoreOS, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: test/build.sh -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This script is used to build and upload containerd with latest CRI plugin - from containerd/cri in gcr.io/k8s-testimages/kubekins-e2e. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/balancerload/load.go -Copyright: 2019 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package balancerload defines APIs to parse server loads in trailers. The - parsed loads are sent to balancers in DoneInfo. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: Makefile.freebsd -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - freebsd specific settings - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/golang/groupcache/lru/lru.go -Copyright: 2013 Google Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package lru implements an LRU cache. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/client-go/pkg/version/def.bzl -Copyright: 2017 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Implements hack/lib/version.sh's kube::version::ldflags() for Bazel. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/go-logr/logr/logr.go -Copyright: 2019 The logr Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package logr defines abstract interfaces for logging. Packages can depend on - these interfaces and callers can implement logging in whatever way is - appropriate. - . - This design derives from Dave Cheney's blog: - http://dave.cheney.net/2015/11/05/lets-talk-about-logging - . - This is a BETA grade API. Until there is a significant 2nd implementation, - I don't really know how it will change. - . - The logging specifically makes it non-trivial to use format strings, to encourage - attaching structured information instead of unstructured format strings. - . - Usage - . - Logging is done using a Logger. Loggers can have name prefixes and named - values attached, so that all log messages logged with that Logger have some - base context associated. - . - The term "key" is used to refer to the name associated with a particular - value, to disambiguate it from the general Logger name. - . - For instance, suppose we're trying to reconcile the state of an object, and - we want to log that we've made some decision. - . - With the traditional log package, we might write: - log.Printf( - "decided to set field foo to value %q for object %s/%s", - targetValue, object.Namespace, object.Name) - . - With logr's structured logging, we'd write: - // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/api/errors/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package errors provides detailed error types for api field validation. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/cpuinfo_x86.go -Copyright: 2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux - +build 386 amd64 - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/watch/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package watch contains a generic watchable interface, and a fake for - testing code that uses the watch interface. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: errdefs/errors.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package errdefs defines the common errors used throughout containerd - packages. - . - Use with errors.Wrap and error.Wrapf to add context to an error. - . - To detect an error class, use the IsXXX functions to tell whether an error - is of a certain type. - . - The functions ToGRPC and FromGRPC can be used to map server-side and - client-side errors to the correct types. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/client-go/tools/remotecommand/doc.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package remotecommand adds support for executing commands in containers, - with support for separate stdin, stdout, and stderr streams, as well as - TTY. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/grpclog/grpclog.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package grpclog defines logging for grpc. - . - All logs in transport and grpclb packages only go to verbose level 2. - All logs in other packages in grpc are logged in spite of the verbosity level. - . - In the default logger, - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: metadata/buckets.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package metadata stores all labels and object specific metadata by namespace. - This package also contains the main garbage collection logic for cleaning up - resources consistently and atomically. Resources used by backends will be - tracked in the metadata store to be exposed to consumers of this package. - . - The layout where a "/" delineates a bucket is described in the following - section. Please try to follow this as closely as possible when adding - functionality. We can bolster this with helpers and more structure if that - becomes an issue. - . - Generically, we try to do the following: - . - /// -> - . - version: Currently, this is "v1". Additions can be made to v1 in a backwards - compatible way. If the layout changes, a new version must be made, along - with a migration. - . - namespace: the namespace to which this object belongs. - . - object: defines which object set is stored in the bucket. There are two - special objects, "labels" and "indexes". The "labels" bucket stores the - labels for the parent namespace. The "indexes" object is reserved for - indexing objects, if we require in the future. - . - key: object-specific key identifying the storage bucket for the objects - contents. - . - Below is the current database schema. This should be updated each time - the structure is changed in addition to adding a migration and incrementing - the database version. Note that `╘══*...*` refers to maps with arbitrary - keys. - ├──version : - Latest version, see migrations - └──v1 - Schema version bucket - ╘══*namespace* - ├──labels - │  ╘══*key* : - Label value - ├──image - │  ╘══*image name* - │   ├──createdat : - Created at - │   ├──updatedat : - Updated at - │   ├──target - │   │  ├──digest : - Descriptor digest - │   │  ├──mediatype : - Descriptor media type - │   │  └──size : - Descriptor size - │   └──labels - │   ╘══*key* : - Label value - ├──containers - │  ╘══*container id* - │   ├──createdat : - Created at - │   ├──updatedat : - Updated at - │   ├──spec : - Proto marshaled spec - │   ├──image : - Image name - │   ├──snapshotter : - Snapshotter name - │   ├──snapshotKey : - Snapshot key - │   ├──runtime - │   │  ├──name : - Runtime name - │   │  ├──extensions - │   │  │  ╘══*name* : - Proto marshaled extension - │   │  └──options : - Proto marshaled options - │   └──labels - │   ╘══*key* : - Label value - ├──snapshots - │  ╘══*snapshotter* - │   ╘══*snapshot key* - │    ├──name : - Snapshot name in backend - │   ├──createdat : - Created at - │   ├──updatedat : - Updated at - │    ├──parent : - Parent snapshot name - │   ├──children - │   │  ╘══*snapshot key* : - Child snapshot reference - │   └──labels - │   ╘══*key* : - Label value - ├──content - │  ├──blob - │  │ ╘══*blob digest* - │  │ ├──createdat : - Created at - │  │ ├──updatedat : - Updated at - │  │   ├──size : - Blob size - │  │ └──labels - │  │ ╘══*key* : - Label value - │  └──ingests - │   ╘══*ingest reference* - │    ├──ref : - Ingest reference in backend - │   ├──expireat : - Time to expire ingest - │   └──expected : - Expected commit digest - └──leases - ╘══*lease id* - ├──createdat : - Created at - ├──labels - │ ╘══*key* : - Label value - ├──snapshots - │  ╘══*snapshotter* - │   ╘══*snapshot key* : - Snapshot reference - ├──content - │  ╘══*blob digest* : - Content blob reference - └──ingests - ╘══*ingest reference* : - Content ingest reference - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package httpstream adds multiplexed streaming support to HTTP requests and - responses via connection upgrades. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/runtime/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package runtime includes helper functions for working with API objects - that follow the kubernetes API object conventions, which are: - . - 0. Your API objects have a common metadata struct member, TypeMeta. - . - 1. Your code refers to an internal set of API objects. - . - 2. In a separate package, you have an external set of API objects. - . - 3. The external set is considered to be versioned, and no breaking - changes are ever made to it (fields may be added but not changed - or removed). - . - 4. As your api evolves, you'll make an additional versioned package - with every major change. - . - 5. Versioned packages have conversion functions which convert to - and from the internal version. - . - 6. You'll continue to support older versions according to your - deprecation policy, and you can easily provide a program/library - to update old versions into new versions because of 5. - . - 7. All of your serializations and deserializations are handled in a - centralized place. - . - Package runtime provides a conversion helper to make 5 easy, and the - Encode/Decode/DecodeInto trio to accomplish 7. You can also register - additional "codecs" which use a version of your choice. It's - recommended that you register your types with runtime in your - package's init function. - . - As a bonus, a few common types useful from all api objects and versions - are provided in types.go. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/client_golang/prometheus/doc.go -Copyright: 2014 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package prometheus is the core instrumentation package. It provides metrics - primitives to instrument code for monitoring. It also offers a registry for - metrics. Sub-packages allow to expose the registered metrics via HTTP - (package promhttp) or push them to a Pushgateway (package push). There is - also a sub-package promauto, which provides metrics constructors with - automatic registration. - . - All exported functions and methods are safe to be used concurrently unless - specified otherwise. - . - A Basic Example - . - As a starting point, a very basic usage example: - . - package main - . - import ( - "log" - "net/http" - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/connectivity/connectivity.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package connectivity defines connectivity semantics. - For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. - All APIs in this package are experimental. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/buffer/unbounded.go -Copyright: 2019 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package buffer provides an implementation of an unbounded buffer. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/config-selinux -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - set the desired SELinux mode via envvar - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containerd/continuity/Makefile -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Set an output prefix, which is the local directory if not specified - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: integration/util/doc.go -Copyright: 2015 The Kubernetes Authors. - The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package util holds utility functions. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: Makefile.linux -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - linux specific settings - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/grpcsync/event.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package grpcsync implements additional synchronization primitives built upon - the sync package. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/util/sets/doc.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by set-gen. DO NOT EDIT. - . - Package sets has auto-generated set types. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go -Copyright: 2019 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build !go1.12 - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: pkg/cri/streaming/portforward/constants.go -Copyright: 2015 The Kubernetes Authors. - The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package portforward contains server-side logic for handling port forwarding requests. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containerd/cgroups/v2/devicefilter.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Devicefilter containes eBPF device filter program - . - The implementation is based on https://github.com/containers/crun/blob/0.10.2/src/libcrun/ebpf.c - . - Although ebpf.c is originally licensed under LGPL-3.0-or-later, the author (Giuseppe Scrivano) - agreed to relicense the file in Apache License 2.0: https://github.com/opencontainers/runc/issues/2144#issuecomment-543116397 - . - This particular Go implementation based on runc version - https://github.com/opencontainers/runc/blob/master/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/go.opencensus.io/trace/doc.go -Copyright: 2017 OpenCensus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package trace contains support for OpenCensus distributed tracing. - . - The following assumes a basic familiarity with OpenCensus concepts. - See http://opencensus.io - . - Exporting Traces - . - To export collected tracing data, register at least one exporter. You can use - one of the provided exporters or write your own. - . - trace.RegisterExporter(exporter) - . - By default, traces will be sampled relatively rarely. To change the sampling - frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler - to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - . - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - . - Be careful about using trace.AlwaysSample in a production application with - significant traffic: a new trace will be started and exported for every request. - . - Adding Spans to a Trace - . - A trace consists of a tree of spans. In Go, the current span is carried in a - context.Context. - . - It is common to want to capture all the activity of a function call in a span. For - this to work, the function must take a context.Context as a parameter. Add these two - lines to the top of the function: - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: filters/filter.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package filters defines a syntax and parser that can be used for the - filtration of items across the containerd API. The core is built on the - concept of protobuf field paths, with quoting. Several operators allow the - user to flexibly select items based on field presence, equality, inequality - and regular expressions. Flexible adaptors support working with any type. - . - The syntax is fairly familiar, if you've used container ecosystem - projects. At the core, we base it on the concept of protobuf field - paths, augmenting with the ability to quote portions of the field path - to match arbitrary labels. These "selectors" come in the following - syntax: - . - ``` - [] - ``` - . - A basic example is as follows: - . - ``` - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/backoff/backoff.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package backoff implement the backoff strategy for gRPC. - . - This is kept in internal until the gRPC project decides whether or not to - allow alternative backoff strategies. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: remotes/docker/config/hosts.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package config contains utilities for helping configure the Docker resolver - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containerd/continuity/groups_unix.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - nolint:unused,deadcode - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/install-dev-tools -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Install developer tools to $GOBIN (or $GOPATH/bin if unset) - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/keepalive/keepalive.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package keepalive defines configurable parameters for point-to-point - healthcheck. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/go.opencensus.io/opencensus.go -Copyright: 2017 OpenCensus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package opencensus contains Go support for OpenCensus. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package syscall provides functionalities that grpc uses to get low-level operating system - stats/info. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto -Copyright: 2018 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - To regenerate api.pb.go run hack/update-generated-runtime.sh - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/encoding/proto/proto.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package proto defines the protobuf codec. Importing this package will - register the codec. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go -Copyright: 2013 Matt T. Proud -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package pbutil provides record length-delimited Protocol Buffer streaming. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/utils/exec/doc.go -Copyright: 2017 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package exec provides an injectable interface and implementations for running commands. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/client-go/util/workqueue/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package workqueue provides a simple queue that supports the following - features: - * Fair: items processed in the order in which they are added. - * Stingy: a single item will not be processed multiple times concurrently, - and if an item is added multiple times before it can be processed, it - will only be processed once. - * Multiple consumers and producers. In particular, it is allowed for an - item to be reenqueued while it is being processed. - * Shutdown notifications. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package roundrobin defines a roundrobin balancer. Roundrobin balancer is - installed as one of the default balancers in gRPC, users don't need to - explicitly install this balancer. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/util/framer/framer.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package framer implements simple frame decoding techniques for an io.ReadCloser - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package streaming implements encoder and decoder for streams - of runtime.Objects over io.Writer/Readers. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/backoff.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - See internal/backoff package for the backoff implementation. This file is - kept for the exported types and API backward compatibility. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/go.opencensus.io/trace/trace_nongo11.go -Copyright: 2018 OpenCensus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build !go1.11 - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/status/status.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package status implements errors returned by gRPC. These errors are - serialized and transmitted on the wire between server and client, and allow - for additional data to be transmitted via the Details field in the status - proto. gRPC service handlers should return an error created by this - package, and gRPC clients should expect a corresponding error to be - returned from the RPC call. - . - This package upholds the invariants that a non-nil error may not - contain an OK code, and an OK code must result in a nil error. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: Vagrantfile -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Vagrantfile for cgroup2 and SELinux - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: gc/gc.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package gc experiments with providing central gc tooling to ensure - deterministic resource removal within containerd. - . - For now, we just have a single exported implementation that can be used - under certain use cases. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/transport/transport.go -Copyright: 2014 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package transport defines and implements message oriented communication - channel to complete various transactions (e.g., an RPC). It is meant for - grpc-internal usage and is not intended to be imported directly by users. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/types/doc.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package types implements various generic types used throughout kubernetes. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: snapshots/storage/metastore.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package storage provides a metadata storage implementation for snapshot - drivers. Drive implementations are responsible for starting and managing - transactions using the defined context creator. This storage package uses - BoltDB for storing metadata. Access to the raw boltdb transaction is not - provided, but the stored object is provided by the proto subpackage. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/util/errors/doc.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package errors implements various utility functions and types around errors. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/opencontainers/image-spec/identity/chainid.go -Copyright: 2016 The Linux Foundation -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package identity provides implementations of subtle calculations pertaining - to image and layer identity. The primary item present here is the ChainID - calculation used in identifying the result of subsequent layer applications. - . - Helpers are also provided here to ease transition to the - github.com/opencontainers/go-digest package, but that package may be used - directly. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/internal.go -Copyright: 2016 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package internal contains gRPC-internal code, to avoid polluting - the godoc of the top-level grpc package. It must not import any grpc - symbols to avoid circular dependencies. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: pkg/cri/store/truncindex/truncindex.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This file is a copy of moby/moby/pkg/truncindex/truncindex.go - . - Package truncindex provides a general 'index tree', used by Docker - in order to be able to reference containers by only a few unambiguous - characters of their id. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/tap/tap.go -Copyright: 2016 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package tap defines the function handles which are executed on the transport - layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: api/events/doc.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package events has protobuf types for various events that are used in - containerd. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go -Copyright: 2016 CNI authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build darwin dragonfly freebsd linux netbsd openbsd solaris - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/backoff/backoff.go -Copyright: 2019 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package backoff provides configuration options for backoff. - . - More details can be found at: - https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. - . - All APIs in this package are experimental. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: images/archive/importer.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package archive provides a Docker and OCI compatible importer - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/opencontainers/go-digest/doc.go -Copyright: 2017 Docker, Inc. - 2019-2020 OCI Contributors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package digest provides a generalized type to opaquely represent message - digests and their operations within the registry. The Digest type is - designed to serve as a flexible identifier in a content-addressable system. - More importantly, it provides tools and wrappers to work with - hash.Hash-based digests with little effort. - . - Basics - . - The format of a digest is simply a string with two parts, dubbed the - "algorithm" and the "digest", separated by a colon: - . - : - . - An example of a sha256 digest representation follows: - . - sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc - . - The "algorithm" portion defines both the hashing algorithm used to calculate - the digest and the encoding of the resulting digest, which defaults to "hex" - if not otherwise specified. Currently, all supported algorithms have their - digests encoded in hex strings. - . - In the example above, the string "sha256" is the algorithm and the hex bytes - are the "digest". - . - Because the Digest type is simply a string, once a valid Digest is - obtained, comparisons are cheap, quick and simple to express with the - standard equality operator. - . - Verification - . - The main benefit of using the Digest type is simple verification against a - given digest. The Verifier interface, modeled after the stdlib hash.Hash - interface, provides a common write sink for digest verification. After - writing is complete, calling the Verifier.Verified method will indicate - whether or not the stream of bytes matches the target digest. - . - Missing Features - . - In addition to the above, we intend to add the following features to this - package: - . - 1. A Digester type that supports write sink digest calculation. - . - 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/install-critools -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Builds and installs critools including critest and crictl - to /usr/local/bin. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go -Copyright: 2019 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux,appengine !linux - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/transport/log.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This file contains wrappers for grpclog functions. - The transport package only logs to verbose level 2 by default. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/common/expfmt/expfmt.go -Copyright: 2015 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package expfmt contains tools for reading and writing Prometheus metrics. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/metadata/metadata.go -Copyright: 2014 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package metadata define the structure of the metadata supported by gRPC library. - Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md - for more information about custom-metadata. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/peer/peer.go -Copyright: 2014 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package peer defines various peer information associated with RPCs and - corresponding utils. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/envconfig/envconfig.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package envconfig contains grpc settings configured by environment variables. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/credentials/internal/syscallconn.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package internal contains credentials-internal code. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/gopkg.in/yaml.v3/yaml.go -Copyright: 2011-2019 Canonical Ltd -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package yaml implements YAML support for the Go language. - . - Source code and other details for the project are available at GitHub: - . - https://github.com/go-yaml/yaml - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go -Copyright: 2018 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux,!appengine - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/client-go/util/keyutil/key.go -Copyright: 2018 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package keyutil contains utilities for managing public/private key pairs. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go -Copyright: 2016 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package promhttp provides tooling around HTTP servers and clients. - . - First, the package allows the creation of http.Handler instances to expose - Prometheus metrics via HTTP. promhttp.Handler acts on the - prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a - custom registry or anything that implements the Gatherer interface. It also - allows the creation of handlers that act differently on errors or allow to - log errors. - . - Second, the package provides tooling to instrument instances of http.Handler - via middleware. Middleware wrappers follow the naming scheme - InstrumentHandlerX, where X describes the intended use of the middleware. - See each function's doc comment for specific details. - . - Finally, the package allows for an http.RoundTripper to be instrumented via - middleware. Middleware wrappers follow the naming scheme - InstrumentRoundTripperX, where X describes the intended use of the - middleware. See each function's doc comment for specific details. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/util/wait/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package wait provides tools for polling or listening for changes - to a condition. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/credentials/credentials.go -Copyright: 2014 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package credentials implements various credentials supported by gRPC library, - which encapsulate all the state needed by a client to authenticate with a - server and make various assertions, e.g., about the client's identity, role, - or whether it is authorized to make a particular call. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go -Copyright: 2019 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package serviceconfig defines types and methods for operating on gRPC - service configs. - . - This package is EXPERIMENTAL. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by protoc-gen-gogo. DO NOT EDIT. - source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/Makefile.common -Copyright: 2018 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - A common Makefile that includes rules to be reused in different prometheus projects. - !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - . - Example usage : - Create the main Makefile in the root project directory. - include Makefile.common - customTarget: - @echo ">> Running customTarget" - . - Ensure GOBIN is not set during build so that promu is installed to the correct path - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: runtime/restart/restart.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package restart enables containers to have labels added and monitored to - keep the container's task running if it is killed. - . - Setting the StatusLabel on a container instructs the restart monitor to keep - that container's task in a specific status. - Setting the LogPathLabel on a container will setup the task's IO to be redirected - to a log file when running a task within the restart manager. - . - The restart labels can be cleared off of a container using the WithNoRestarts Opt. - . - The restart monitor has one option in the containerd config under the [plugins.restart] - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/runtime/conversion.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package runtime defines conversions between generic types and structs to map query strings - to struct objects. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/channelz/funcs.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package channelz defines APIs for enabling channelz service, entry - registration/deletion, and accessing channelz data. It also defines channelz - metric struct formats. - . - All APIs in this package are experimental. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This file contains exported variables/functions that are exported for testing - only. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/sigs.k8s.io/structured-merge-diff/v4/value/doc.go -Copyright: 2018 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package value defines types for an in-memory representation of yaml or json - objects, organized for convenient comparison with a schema (as defined by - the sibling schema package). Functions for reading and writing the objects - are also provided. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go -Copyright: 2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux - +build ppc64 ppc64le - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/encoding/encoding.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package encoding defines the interface for the compressor and codec, and - functions to register and retrieve compressors and codecs. - . - This package is EXPERIMENTAL. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/doc.go -Copyright: 2015 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package grpc implements an RPC system called gRPC. - . - See grpc.io for more information about gRPC. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: integration/remote/doc.go -Copyright: 2016 The Kubernetes Authors. - The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package remote contains gRPC implementation of internalapi.RuntimeService - and internalapi.ImageManagerService. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: pkg/cri/server/bandwidth/doc.go -Copyright: 2015 The Kubernetes Authors. - The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package bandwidth provides utilities for bandwidth shaping - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: platforms/platforms.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package platforms provides a toolkit for normalizing, matching and - specifying container platforms. - . - Centered around OCI platform specifications, we define a string-based - specifier syntax that can be used for user input. With a specifier, users - only need to specify the parts of the platform that are relevant to their - context, providing an operating system or architecture or both. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: test/e2e_node/gci-init.sh -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This script is used to do extra initialization on GCI. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package v1 contains API types that are common to all versions. - . - The package contains two categories of types: - - external (serialized) types that lack their own version (e.g TypeMeta) - - internal (never-serialized) types that are needed by several different - api groups, and so live here, to avoid duplication and/or import loops - (e.g. LabelSelector). - In the future, we will probably move these categories of objects into - separate packages. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go -Copyright: 2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux - +build riscv riscv64 - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: Makefile -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Go command to use for build - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/install-cni -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Builds and installs cni plugins to /opt/cni/bin, - and create basic cni config in /etc/cni/net.d. - The commit defined in go.mod - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go -Copyright: 2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux - +build mips mipsle mips64 mips64le - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/go.opencensus.io/trace/trace_go11.go -Copyright: 2018 OpenCensus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build go1.11 - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/balancer/base/base.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package base defines a balancer base that can be used to build balancers with - different picking algorithms. - . - The base balancer creates a new SubConn for each resolved address. The - provided picker will only be notified about READY SubConns. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package protobuf provides a Kubernetes serializer for the protobuf format. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/resolver/resolver.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package resolver defines APIs for name resolution in gRPC. - All APIs in this package are experimental. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package passthrough implements a pass-through resolver. It sends the target - name without scheme back to gRPC as resolved address. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containerd/continuity/driver/driver_windows.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Go 1.13 is the minimally supported version for Windows. - Earlier golang releases have bug in os.Readlink - (see https://github.com/golang/go/issues/30463). - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: pkg/progress/doc.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package progress assists in displaying human readable progress information. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/doc.go -Copyright: 2014 Prometheus Team -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package procfs provides functions to retrieve system, kernel and process - metrics from the pseudo-filesystem proc. - . - Example: - . - package main - . - import ( - "fmt" - "log" - . - "github.com/prometheus/procfs" - ) - . - func main() { - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: pkg/cri/streaming/remotecommand/doc.go -Copyright: 2016 The Kubernetes Authors. - The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package remotecommand contains functions related to executing commands in and attaching to pods. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/health/server.go -Copyright: 2017 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - go:generate ./regenerate.sh - . - Package health provides a service that exposes server's health and it must be - imported to enable support for client-side health checks. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/install-seccomp -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Builds a specific version of libseccomp and installs in /usr/local - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package queryparams provides conversion from versioned - runtime objects to URL query values - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/fields/doc.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package fields implements a simple field system, parsing and matching - selectors with sets of fields. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by protoc-gen-gogo. DO NOT EDIT. - source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: images/converter/converter.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package converter provides image converter - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/klog/v2/klog_file.go -Copyright: 2013 Google Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - File I/O for logs. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/go.opencensus.io/trace/internal/internal.go -Copyright: 2018 OpenCensus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package internal provides trace internals. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: identifiers/validate.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package identifiers provides common validation for identifiers and keys - across containerd. - . - Identifiers in containerd must be a alphanumeric, allowing limited - underscores, dashes and dots. - . - While the character set may be expanded in the future, identifiers - are guaranteed to be safely used as filesystem path components. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: reference/docker/reference.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package docker provides a general type to represent any way of referencing images within the registry. - Its main purpose is to abstract tags and digests (content-addressable hash). - . - Grammar - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apiserver/pkg/server/httplog/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package httplog contains a helper object and functions to maintain a log - along with an http response. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: defaults/doc.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package defaults provides several common defaults for interacting with - containerd. These can be used on the client-side or server-side. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/install-imgcrypt -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Builds and installs imgcrypt including ctd-decoder and ctr-enc - to /usr/local/bin. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by protoc-gen-gogo. DO NOT EDIT. - source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/codes/codes.go -Copyright: 2014 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package codes defines the canonical error codes used by gRPC. It is - consistent across various languages. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package grpcrand implements math/rand functions in a concurrent-safe way - with a global random source, independent of math/rand's global source. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/client-go/util/connrotation/connrotation.go -Copyright: 2018 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package connrotation implements a connection dialer that tracks and can close - all created connections. - . - This is used for credential rotation of long-lived connections, when there's - no way to re-authenticate on a live connection. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/transport/handler_server.go -Copyright: 2016 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - This file is the implementation of a gRPC server using HTTP/2 which - uses the standard Go http2 Server implementation (via the - http.Handler interface), rather than speaking low-level HTTP/2 - frames itself. It is the implementation of *grpc.Server.ServeHTTP. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/labels/doc.go -Copyright: 2014 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package labels implements a simple label system, parsing and matching - selectors with sets of labels. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/go.opencensus.io/trace/tracestate/tracestate.go -Copyright: 2018 OpenCensus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package tracestate implements support for the Tracestate header of the - W3C TraceContext propagation format. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go -Copyright: 2018 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package dns implements a dns resolver to be installed as the default resolver - in grpc. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package wsstream contains utilities for streaming content over WebSockets. - The Conn type allows callers to multiplex multiple read/write channels over - a single websocket. The Reader type allows an io.Reader to be copied over - a websocket channel as binary content. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/client_golang/prometheus/build_info.go -Copyright: 2019 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build go1.12 - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/pelletier/go-toml/localtime.go -Copyright: 2016 Google LLC -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package civil implements types for civil time, a time-zone-independent - representation of time that follows the rules of the proleptic - Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second - minutes. - . - Because they lack location information, these types do not represent unique - moments or intervals of time. Use time.Time for that purpose. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/proc_maps.go -Copyright: 2019 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: Makefile.darwin -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - darwin specific settings - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: Makefile.windows -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Windows specific settings. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/gopkg.in/square/go-jose.v2/doc.go -Copyright: 2014 Square Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package jose aims to provide an implementation of the Javascript Object Signing - and Encryption set of standards. It implements encryption and signing based on - the JSON Web Encryption and JSON Web Signature standards, with optional JSON - Web Token support available in a sub-package. The library supports both the - compact and full serialization formats, and has optional support for multiple - recipients. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/grpc/attributes/attributes.go -Copyright: 2019 gRPC authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package attributes defines a generic key/value store used in various gRPC - components. - . - All APIs in this package are EXPERIMENTAL. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/google/gofuzz/doc.go -Copyright: 2014 Google Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package fuzz is a library for populating go objects with random values. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/config-containerd -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - establishes /etc/containerd/config.toml - parameterized by the current SELinux mode - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containerd/btrfs/doc.go -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package btrfs provides bindings for working with btrfs partitions from Go. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/install-protobuf -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Downloads and installs protobuf - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/procfs/cpuinfo_armx.go -Copyright: 2020 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - +build linux - +build arm arm64 - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/common/model/model.go -Copyright: 2013 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package model contains common data structures that are shared across - Prometheus components and libraries. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/prometheus/common/expfmt/fuzz.go -Copyright: 2014 The Prometheus Authors -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Build only when actually fuzzing - +build gofuzz - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/client-go/tools/metrics/metrics.go -Copyright: 2015 The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Package metrics provides abstractions for registering which metrics - to record. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: script/setup/install-runc -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Builds and installs runc to /usr/local/go/bin based off - the commit defined in go.mod - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/github.com/containerd/imgcrypt/Makefile -Copyright: The containerd Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Base path used to install. - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go -Copyright: The Kubernetes Authors. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - Code generated by protoc-gen-gogo. DO NOT EDIT. - source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - . - On Debian systems, the complete text of the Apache License Version 2.0 - can be found in `/usr/share/common-licenses/Apache-2.0'. - -Files: vendor/google.golang.org/appengine/urlfetch/urlfetch.go -Copyright: 2011 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by the Apache 2.0 - license that can be found in the LICENSE file. - . - Package urlfetch provides an http.RoundTripper implementation - for fetching URLs via App Engine's urlfetch service. - -Files: vendor/golang.org/x/sys/plan9/dir_plan9.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Plan 9 directory marshalling. See intro(5). - -Files: vendor/golang.org/x/net/http/httpguts/guts.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package httpguts provides functions implementing various details - of the HTTP specification. - . - This package is shared by the standard library (which vendors it) - and x/net/http2. It comes with no API stability promise. - -Files: vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build gccgo && linux && amd64 - +build gccgo,linux,amd64 - -Files: vendor/golang.org/x/sys/windows/svc/go12.c -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - +build !go1.3 - . - copied from pkg/runtime - -Files: vendor/golang.org/x/sys/windows/svc/service.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - . - Package svc provides everything required to build Windows service. - -Files: vendor/golang.org/x/oauth2/internal/doc.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package internal contains support packages for oauth2 package. - -Files: vendor/golang.org/x/sys/unix/bluetooth_linux.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Bluetooth sockets and messages - -Files: vendor/golang.org/x/sys/unix/endian_little.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh - +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh - -Files: vendor/golang.org/x/sys/windows/svc/mgr/mgr.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - . - Package mgr can be used to manage Windows service programs. - It can be used to install and remove them. It can also start, - stop and pause them. The package can query / change current - service state and config parameters. - -Files: vendor/golang.org/x/term/term.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package term provides support functions for dealing with terminals, as - commonly found on UNIX systems. - . - Putting a terminal into raw mode is the most common requirement: - -Files: vendor/github.com/google/uuid/node_js.go -Copyright: 2017 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build js - -Files: vendor/golang.org/x/sys/unix/pagesize_unix.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - . - For Unix, get the pagesize from the runtime. - -Files: vendor/github.com/emicklei/go-restful/curly_route.go -Copyright: 2013 Ernest Micklei. -License: __UNKNOWN__ - Use of this source code is governed by a license - that can be found in the LICENSE file. - . - curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. - -Files: vendor/golang.org/x/sys/windows/svc/go12.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - +build !go1.3 - -Files: vendor/github.com/klauspost/compress/zstd/decodeheader.go -Copyright: 2020 + Klaus Post. -License: __UNKNOWN__ - License information can be found in the LICENSE file. - -Files: vendor/github.com/fsnotify/fsnotify/fsnotify.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !plan9 - . - Package fsnotify provides a platform-independent interface for file system notifications. - -Files: vendor/golang.org/x/net/http2/client_conn_pool.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Transport code's client connection pooling. - -Files: vendor/github.com/klauspost/compress/snappy/snappy.go -Copyright: 2011 The Snappy-Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package snappy implements the Snappy compression format. It aims for very - high speeds and reasonable compression. - . - There are actually two Snappy formats: block and stream. They are related, - but different: trying to decompress block-compressed data as a Snappy stream - will fail, and vice versa. The block format is the Decode and Encode - functions and the stream format is the Reader and Writer types. - . - The block format, the more common case, is used when the complete size (the - number of bytes) of the original data is known upfront, at the time - compression starts. The stream format, also known as the framing format, is - for when that isn't always true. - . - The canonical, C++ implementation is at https://github.com/google/snappy and - it only implements the block format. - -Files: vendor/golang.org/x/sys/windows/registry/key.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - . - Package registry provides access to the Windows registry. - . - Here is a simple example, opening a registry key and reading a string value from it. - -Files: vendor/golang.org/x/sys/unix/syscall_linux_386.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build 386 && linux - +build 386,linux - -Files: vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm64 && openbsd - +build arm64,openbsd - -Files: vendor/golang.org/x/sys/plan9/env_plan9.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Plan 9 environment variables. - -Files: vendor/golang.org/x/sys/unix/sockcmsg_unix.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - . - Socket control messages - -Files: vendor/golang.org/x/net/trace/trace.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package trace implements tracing of requests and long-lived objects. - It exports HTTP interfaces on /debug/requests and /debug/events. - . - A trace.Trace provides tracing for short-lived objects, usually requests. - A request handler might be implemented like this: - . - func fooHandler(w http.ResponseWriter, req *http.Request) { - -Files: vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && gccgo && arm - +build linux,gccgo,arm - -Files: vendor/github.com/emicklei/go-restful/options_filter.go -Copyright: 2013 Ernest Micklei. -License: __UNKNOWN__ - Use of this source code is governed by a license - that can be found in the LICENSE file. - . - OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method - and provides the response with a set of allowed methods for the request URL Path. - As for any filter, you can also install it for a particular WebService within a Container. - Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). - -Files: vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm && gc && linux - +build arm,gc,linux - -Files: vendor/github.com/google/uuid/node_net.go -Copyright: 2017 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !js - -Files: vendor/golang.org/x/sys/unix/asm_zos_s390x.s -Copyright: 2020 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build zos && s390x && gc - +build zos - +build s390x - +build gc - -Files: vendor/golang.org/x/sys/unix/dev_netbsd.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Functions to access/create device major and minor numbers matching the - encoding used in NetBSD's sys/types.h header. - -Files: vendor/golang.org/x/sys/unix/readdirent_getdents.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || dragonfly || freebsd || linux || netbsd || openbsd - +build aix dragonfly freebsd linux netbsd openbsd - -Files: vendor/google.golang.org/appengine/internal/internal.go -Copyright: 2011 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by the Apache 2.0 - license that can be found in the LICENSE file. - . - Package internal provides support for package appengine. - . - Programs should not use this package directly. Its API is not stable. - Use packages appengine and appengine/* instead. - -Files: vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC - 2898 / PKCS #5 v2.0. - . - A key derivation function is useful when encrypting data based on a password - or any other not-fully-random data. It uses a pseudorandom function to derive - a secure encryption key based on the password. - . - While v2.0 of the standard defines only one pseudorandom function to use, - HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved - Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To - choose, you can pass the `New` functions from the different SHA packages to - pbkdf2.Key. - -Files: vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm && freebsd - +build arm,freebsd - -Files: vendor/golang.org/x/sys/unix/cap_freebsd.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build freebsd - +build freebsd - -Files: vendor/github.com/google/uuid/doc.go -Copyright: 2016 Google Inc. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package uuid generates and inspects UUIDs. - . - UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security - Services. - . - A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to - maps or compared directly. - -Files: vendor/golang.org/x/sys/unix/dev_dragonfly.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Functions to access/create device major and minor numbers matching the - encoding used in Dragonfly's sys/types.h header. - . - The information below is extracted and adapted from sys/types.h: - . - Minor gives a cookie instead of an index since in order to avoid changing the - meanings of bits 0-15 or wasting time and space shifting bits 16-31 for - devices that don't use them. - -Files: vendor/golang.org/x/net/http2/not_go111.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build !go1.11 - +build !go1.11 - -Files: vendor/golang.org/x/sys/unix/dev_freebsd.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Functions to access/create device major and minor numbers matching the - encoding used in FreeBSD's sys/types.h header. - . - The information below is extracted and adapted from sys/types.h: - . - Minor gives a cookie instead of an index since in order to avoid changing the - meanings of bits 0-15 or wasting time and space shifting bits 16-31 for - devices that don't use them. - -Files: vendor/golang.org/x/crypto/ed25519/ed25519.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - In Go 1.13, the ed25519 package was promoted to the standard library as - crypto/ed25519, and this package became a wrapper for the standard library one. - . - go:build !go1.13 - +build !go1.13 - . - Package ed25519 implements the Ed25519 signature algorithm. See - https://ed25519.cr.yp.to/. - . - These functions are also compatible with the “Ed25519” function defined in - RFC 8032. However, unlike RFC 8032's formulation, this package's private key - representation includes a public key suffix to make multiple signing - operations with the same key more efficient. This package refers to the RFC - 8032 private key as the “seed”. - -Files: vendor/golang.org/x/term/term_unix.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build aix darwin dragonfly freebsd linux netbsd openbsd zos - -Files: vendor/golang.org/x/sys/unix/syscall_linux.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Linux system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and - wrap it in our own nicer implementation. - -Files: vendor/github.com/emicklei/go-restful/filter.go -Copyright: 2013 Ernest Micklei. -License: __UNKNOWN__ - Use of this source code is governed by a license - that can be found in the LICENSE file. - . - FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. - -Files: vendor/golang.org/x/net/internal/timeseries/timeseries.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package timeseries implements a time series structure for stats collection. - -Files: vendor/golang.org/x/sys/unix/syscall_openbsd.go -Copyright: 2009-2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - OpenBSD system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and wrap - it in our own nicer implementation, either here or in - -Files: vendor/golang.org/x/sys/unix/race0.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos - +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos - -Files: vendor/golang.org/x/sys/unix/syscall_linux_ppc.go -Copyright: 2021 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && ppc - +build linux - +build ppc - -Files: vendor/github.com/opencontainers/go-digest/README.md -Copyright: 2016 Docker, Inc. except as follows. - 2019-2020 OCI Contributors -License: __UNKNOWN__ - Code is released under the [Apache 2.0 license](LICENSE). - This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). - You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. - . - [security]: https://github.com/opencontainers/org/blob/master/security - -Files: vendor/golang.org/x/sys/unix/asm_linux_s390x.s -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && s390x && gc - +build linux - +build s390x - +build gc - -Files: vendor/github.com/urfave/cli/app.go -Copyright: __NO_COPYRIGHT__ in: vendor/github.com/urfave/cli/app.go -License: __UNKNOWN__ - // Name of Author (Note: Use App.Authors, this is deprecated) - Author string - // Email of Author (Note: Use App.Authors, this is deprecated) - Email string - // Writer writer to write output to - Writer io.Writer - // ErrWriter writes error output - ErrWriter io.Writer - // Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to - // function as a default, so this is optional. - ExitErrHandler ExitErrHandlerFunc - // Other custom info - Metadata map[string]interface{} - // Carries a function which returns app specific info. - ExtraInfo func() map[string]string - // CustomAppHelpTemplate the text template for app help topic. - // cli.go uses text/template to render templates. You can - // render custom help text by setting this variable. - CustomAppHelpTemplate string - // Boolean to enable short-option handling so user can combine several - // single-character bool arguements into one - // i.e. foobar -o -v -> foobar -ov - UseShortOptionHandling bool - . - didSetup bool - } - . - // Tries to find out when this binary was compiled. - // Returns the current time if it fails to find it. - func compileTime() time.Time { - -Files: vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && darwin - +build amd64,darwin - -Files: vendor/golang.org/x/sys/unix/sockcmsg_linux.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Socket control messages - -Files: vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build darwin && go1.12 && !go1.13 - +build darwin,go1.12,!go1.13 - -Files: vendor/gopkg.in/square/go-jose.v2/json/encode.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package json implements encoding and decoding of JSON objects as defined in - RFC 4627. The mapping between JSON objects and Go values is described - in the documentation for the Marshal and Unmarshal functions. - . - See "JSON and Go" for an introduction to this package: - -Files: vendor/golang.org/x/net/context/context.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package context defines the Context type, which carries deadlines, - cancelation signals, and other request-scoped values across API boundaries - and between processes. - As of Go 1.7 this package is available in the standard library under the - name context. https://golang.org/pkg/context. - . - Incoming requests to a server should create a Context, and outgoing calls to - servers should accept a Context. The chain of function calls between must - propagate the Context, optionally replacing it with a modified copy created - using WithDeadline, WithTimeout, WithCancel, or WithValue. - . - Programs that use Contexts should follow these rules to keep interfaces - consistent across packages and enable static analysis tools to check context - propagation: - . - Do not store Contexts inside a struct type; instead, pass a Context - explicitly to each function that needs it. The Context should be the first - parameter, typically named ctx: - . - func DoSomething(ctx context.Context, arg Arg) error { - // ... use ctx ... - } - . - Do not pass a nil Context, even if a function permits it. Pass context.TODO - if you are unsure about which Context to use. - . - Use context Values only for request-scoped data that transits processes and - APIs, not for passing optional parameters to functions. - . - The same Context may be passed to functions running in different goroutines; - Contexts are safe for simultaneous use by multiple goroutines. - . - See http://blog.golang.org/context for example code for a server that uses - Contexts. - -Files: vendor/golang.org/x/net/context/pre_go17.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build !go1.7 - +build !go1.7 - -Files: vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) - +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc - -Files: vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go -Copyright: 2016 Michal Witkowski. See LICENSE for licensing terms. -License: __UNKNOWN__ - gRPC Prometheus monitoring interceptors for server-side gRPC. - -Files: vendor/golang.org/x/sys/unix/syscall_linux_gc.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && gc - +build linux,gc - -Files: vendor/golang.org/x/sys/unix/gccgo.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build gccgo && !aix - +build gccgo,!aix - -Files: vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt -Copyright: 2011 Open Knowledge Foundation Ltd. -License: __UNKNOWN__ - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - . - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - . - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - FUNCTIONS - -Files: vendor/github.com/emicklei/go-restful/path_processor.go -Copyright: 2018 Ernest Micklei. -License: __UNKNOWN__ - Use of this source code is governed by a license - that can be found in the LICENSE file. - . - PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. - If a Router does not implement this interface then the default behaviour will be used. - -Files: vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && dragonfly - +build amd64,dragonfly - -Files: vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm64 && darwin - +build arm64,darwin - -Files: vendor/google.golang.org/appengine/internal/identity_flex.go -Copyright: 2018 Google LLC. -License: __UNKNOWN__ - Use of this source code is governed by the Apache 2.0 - license that can be found in the LICENSE file. - . - +build appenginevm - -Files: vendor/golang.org/x/sys/unix/aliases.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - +build go1.9 - -Files: vendor/golang.org/x/sys/windows/race.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows,race - -Files: vendor/github.com/moby/sys/symlink/LICENSE.APACHE -Copyright: 2014-2018 Docker, Inc. - license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. -License: __UNKNOWN__ - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - . - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - . - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - . - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - . - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - . - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - . - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - . - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - . - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - . - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - . - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - . - END OF TERMS AND CONDITIONS - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -Files: vendor/golang.org/x/sys/unix/syscall_linux_s390x.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build s390x && linux - +build s390x,linux - -Files: vendor/golang.org/x/sys/unix/asm_linux_riscv64.s -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build riscv64 && gc - +build riscv64 - +build gc - -Files: vendor/golang.org/x/crypto/ssh/terminal/terminal.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package terminal provides support functions for dealing with terminals, as - commonly found on UNIX systems. - . - Deprecated: this package moved to golang.org/x/term. - -Files: vendor/golang.org/x/sys/unix/syscall_freebsd_386.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build 386 && freebsd - +build 386,freebsd - -Files: vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build darwin && go1.13 - +build darwin,go1.13 - -Files: vendor/golang.org/x/sync/semaphore/semaphore.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package semaphore provides a weighted semaphore implementation. - -Files: vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm && openbsd - +build arm,openbsd - -Files: vendor/golang.org/x/net/http2/transport.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Transport code. - -Files: vendor/gopkg.in/square/go-jose.v2/json/decode.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Represents JSON data structure using native Go types: booleans, floats, - strings, arrays, and maps. - -Files: vendor/golang.org/x/sys/windows/exec_windows.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Fork, exec, wait, etc. - -Files: vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go -Copyright: 2016 Michal Witkowski. See LICENSE for licensing terms. -License: __UNKNOWN__ - gRPC Prometheus monitoring interceptors for client-side gRPC. - -Files: vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && (mips64 || mips64le) - +build linux - +build mips64 mips64le - -Files: vendor/golang.org/x/net/context/pre_go19.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build !go1.9 - +build !go1.9 - -Files: vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && gccgo && 386 - +build linux,gccgo,386 - -Files: vendor/github.com/google/go-cmp/cmp/compare.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package cmp determines equality of values. - . - This package is intended to be a more powerful and safer alternative to - reflect.DeepEqual for comparing whether two values are semantically equal. - It is intended to only be used in tests, as performance is not a goal and - it may panic if it cannot compare the values. Its propensity towards - panicking means that its unsuitable for production environments where a - spurious panic may be fatal. - . - The primary features of cmp are: - . - • When the default behavior of equality does not suit the needs of the test, - custom equality functions can override the equality operation. - For example, an equality function may report floats as equal so long as they - are within some tolerance of each other. - . - • Types that have an Equal method may use that method to determine equality. - This allows package authors to determine the equality operation for the types - that they define. - . - • If no custom equality functions are used and no Equal method is defined, - equality is determined by recursively comparing the primitive kinds on both - values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported - fields are not compared by default; they result in panics unless suppressed - by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly - compared using the Exporter option. - -Files: vendor/github.com/fsnotify/fsnotify/kqueue.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build freebsd openbsd netbsd dragonfly darwin - -Files: vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package diff implements an algorithm for producing edit-scripts. - The edit-script is a sequence of operations needed to transform one list - of symbols into another (or vice-versa). The edits allowed are insertions, - deletions, and modifications. The summation of all edits is called the - Levenshtein distance as this problem is well-known in computer science. - . - This package prioritizes performance over accuracy. That is, the run time - is more important than obtaining a minimal Levenshtein distance. - -Files: vendor/golang.org/x/sys/unix/syscall.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - . - Package unix contains an interface to the low-level operating system - primitives. OS details vary depending on the underlying system, and - by default, godoc will display OS-specific documentation for the current - system. If you want godoc to display OS documentation for another - system, set $GOOS and $GOARCH to the desired system. For example, if - you want to view documentation for freebsd/arm on linux/amd64, set $GOOS - to freebsd and $GOARCH to arm. - . - The primary use of this package is inside other packages that provide a more - portable interface to the system, such as "os", "time" and "net". Use - those packages rather than this one if you can. - . - For details of the functions and data types in this package consult - the manuals for the appropriate operating system. - -Files: vendor/golang.org/x/sys/unix/dev_linux.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Functions to access/create device major and minor numbers matching the - encoding used by the Linux kernel and glibc. - . - The information below is extracted and adapted from bits/sysmacros.h in the - glibc sources: - -Files: vendor/github.com/moby/sys/symlink/fs.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE.BSD file. - . - This code is a modified version of path/filepath/symlink.go from the Go standard library. - -Files: vendor/golang.org/x/sys/unix/asm_linux_mips64x.s -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && (mips64 || mips64le) && gc - +build linux - +build mips64 mips64le - +build gc - -Files: vendor/github.com/emicklei/go-restful/web_service.go -Copyright: 2013 Ernest Micklei. -License: __UNKNOWN__ - Use of this source code is governed by a license - that can be found in the LICENSE file. - . - WebService holds a collection of Route values that bind a Http Method + URL Path to a function. - -Files: vendor/github.com/russross/blackfriday/v2/smartypants.go -Copyright: 2011 Russ Ross . Distributed under the Simplified BSD License. See README.md for details. -License: __UNKNOWN__ - SmartyPants rendering - -Files: vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm64 && freebsd - +build arm64,freebsd - -Files: vendor/golang.org/x/net/http2/gotrack.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Defensive debug-only utility to track that functions run on the - goroutine that they're supposed to. - -Files: vendor/golang.org/x/sys/unix/dev_openbsd.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Functions to access/create device major and minor numbers matching the - encoding used in OpenBSD's sys/types.h header. - -Files: vendor/golang.org/x/sys/unix/mkall.sh -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - This script runs or (given -n) prints suggested commands to generate files for - the Architecture/OS specified by the GOARCH and GOOS environment variables. - See README.md for more information about how the build system works. - -Files: vendor/golang.org/x/text/secure/bidirule/bidirule.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package bidirule implements the Bidi Rule defined by RFC 5893. - . - This package is under development. The API may change without notice and - without preserving backward compatibility. - -Files: vendor/golang.org/x/crypto/openpgp/packet/ocfb.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -Files: vendor/golang.org/x/sys/unix/syscall_linux_arm64.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm64 && linux - +build arm64,linux - -Files: vendor/github.com/russross/blackfriday/v2/block.go -Copyright: 2011 Russ Ross . Distributed under the Simplified BSD License. See README.md for details. -License: __UNKNOWN__ - Functions to parse block-level elements. - -Files: vendor/golang.org/x/sys/windows/env_windows.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Windows environment variables. - -Files: vendor/golang.org/x/sys/windows/syscall_windows.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Windows system calls. - -Files: vendor/golang.org/x/crypto/cast5/cast5.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package cast5 implements CAST5, as defined in RFC 2144. - . - CAST5 is a legacy cipher and its short block size makes it vulnerable to - birthday bound attacks (see https://sweet32.info). It should only be used - where compatibility with legacy systems, not security, is the goal. - . - Deprecated: any new system should use AES (from crypto/aes, if necessary in - an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from - golang.org/x/crypto/chacha20poly1305). - -Files: vendor/golang.org/x/sys/unix/dev_aix_ppc64.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix && ppc64 - +build aix,ppc64 - . - Functions to access/create device major and minor numbers matching the - encoding used AIX. - -Files: vendor/golang.org/x/sys/unix/syscall_solaris.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Solaris system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and wrap - it in our own nicer implementation, either here or in - -Files: vendor/golang.org/x/sys/plan9/str.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build plan9 - -Files: vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && linux && gc - +build amd64,linux,gc - -Files: vendor/golang.org/x/sys/windows/svc/go13.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - +build go1.3 - -Files: vendor/golang.org/x/oauth2/internal/client_appengine.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build appengine - -Files: vendor/github.com/prometheus/client_golang/NOTICE -Copyright: 2012-2015 The Prometheus Authors -License: __UNKNOWN__ - This product includes software developed at - SoundCloud Ltd. (http://soundcloud.com/). - . - The following components are included in this product: - . - perks - a fork of https://github.com/bmizerany/perks - https://github.com/beorn7/perks - Copyright 2013-2015 Blake Mizerany, Björn Rabenstein - See https://github.com/beorn7/perks/blob/master/README.md for license details. - . - Go support for Protocol Buffers - Google's data interchange format - http://github.com/golang/protobuf/ - Copyright 2010 The Go Authors - See source code for license details. - . - Support for streaming Protocol Buffer messages for the Go language (golang). - -Files: vendor/golang.org/x/sys/unix/dev_zos.go -Copyright: 2020 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build zos && s390x - +build zos,s390x - . - Functions to access/create device major and minor numbers matching the - encoding used by z/OS. - . - The information below is extracted and adapted from macros. - -Files: vendor/golang.org/x/sys/unix/syscall_darwin.go -Copyright: 2009-2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Darwin system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and wrap - it in our own nicer implementation, either here or in - -Files: vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go -Copyright: 2021 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !go1.13 - . - TODO(≥go1.13): For support on . Distributed under the Simplified BSD License. See README.md for details. -License: __UNKNOWN__ - HTML rendering backend - -Files: vendor/golang.org/x/sys/unix/dev_darwin.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Functions to access/create device major and minor numbers matching the - encoding used in Darwin's sys/types.h header. - -Files: vendor/golang.org/x/sys/windows/syscall.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - . - Package windows contains an interface to the low-level operating system - primitives. OS details vary depending on the underlying system, and - by default, godoc will display the OS-specific documentation for the current - system. If you want godoc to display syscall documentation for another - system, set $GOOS and $GOARCH to the desired system. For example, if - you want to view documentation for freebsd/arm on linux/amd64, set $GOOS - to freebsd and $GOARCH to arm. - . - The primary use of this package is inside other packages that provide a more - portable interface to the system, such as "os", "time" and "net". Use - those packages rather than this one if you can. - . - For details of the functions and data types in this package consult - the manuals for the appropriate operating system. - -Files: vendor/github.com/fsnotify/fsnotify/fen.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build solaris - -Files: vendor/golang.org/x/sys/unix/syscall_bsd.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build darwin || dragonfly || freebsd || netbsd || openbsd - +build darwin dragonfly freebsd netbsd openbsd - . - BSD system call wrappers shared by *BSD based systems - including OS X (Darwin) and FreeBSD. Like the other - -Files: vendor/golang.org/x/sys/unix/syscall_unix_gc.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64 - +build darwin dragonfly freebsd linux netbsd openbsd solaris - +build gc - +build !ppc64le - +build !ppc64 - -Files: vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build riscv64 && linux - +build riscv64,linux - -Files: vendor/golang.org/x/sys/unix/xattr_bsd.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build freebsd || netbsd - +build freebsd netbsd - -Files: vendor/golang.org/x/sync/errgroup/errgroup.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package errgroup provides synchronization, error propagation, and Context - cancelation for groups of goroutines working on subtasks of a common task. - -Files: vendor/golang.org/x/sys/windows/empty.s -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !go1.12 - . - This file is here to allow bodyless functions with go:linkname for Go 1.11 - and earlier (see https://golang.org/issue/23311). - -Files: vendor/github.com/klauspost/compress/fse/fse.go -Copyright: 2018 Klaus Post. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - . - Package fse provides Finite State Entropy encoding and decoding. - . - Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding - for byte blocks as implemented in zstd. - . - See https://github.com/klauspost/compress/tree/master/fse for more information. - -Files: vendor/github.com/miekg/pkcs11/pkcs11t.h -Copyright: OASIS Open 2016. / /Distributed under the terms of the OASIS IPR Policy, [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY -License: __UNKNOWN__ - IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A - PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. - . - Latest version of the specification: - http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html - . - See top of pkcs11.h for information about the macros that - must be defined and the structure-packing conventions that - must be set before including this file. - -Files: vendor/golang.org/x/sys/unix/ptrace_ios.go -Copyright: 2020 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build ios - +build ios - -Files: vendor/golang.org/x/sys/plan9/race.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build plan9,race - -Files: vendor/golang.org/x/sys/plan9/pwd_plan9.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !go1.5 - -Files: vendor/golang.org/x/sys/unix/race.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build (darwin && race) || (linux && race) || (freebsd && race) - +build darwin,race linux,race freebsd,race - -Files: vendor/golang.org/x/crypto/openpgp/packet/packet.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package packet implements parsing and serialization of OpenPGP packets, as - specified in RFC 4880. - -Files: vendor/golang.org/x/sys/unix/syscall_aix.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix - +build aix - . - Aix system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and - wrap it in our own nicer implementation. - -Files: vendor/golang.org/x/term/term_unix_bsd.go -Copyright: 2013 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build darwin dragonfly freebsd netbsd openbsd - -Files: vendor/golang.org/x/sys/unix/syscall_dragonfly.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - DragonFly BSD system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and wrap - it in our own nicer implementation, either here or in - -Files: vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go -Copyright: 2013 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm && netbsd - +build arm,netbsd - -Files: vendor/golang.org/x/crypto/openpgp/s2k/s2k.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package s2k implements the various OpenPGP string-to-key transforms as - specified in RFC 4800 section 3.7.1. - -Files: vendor/golang.org/x/oauth2/oauth2.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package oauth2 provides support for making - OAuth2 authorized and authenticated HTTP requests, - as specified in RFC 6749. - It can additionally grant authorization with Bearer JWT. - -Files: vendor/golang.org/x/sys/plan9/syscall.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build plan9 - . - Package plan9 contains an interface to the low-level operating system - primitives. OS details vary depending on the underlying system, and - by default, godoc will display the OS-specific documentation for the current - system. If you want godoc to display documentation for another - system, set $GOOS and $GOARCH to the desired system. For example, if - you want to view documentation for freebsd/arm on linux/amd64, set $GOOS - to freebsd and $GOARCH to arm. - . - The primary use of this package is inside other packages that provide a more - portable interface to the system, such as "os", "time" and "net". Use - those packages rather than this one if you can. - . - For details of the functions and data types in this package consult - the manuals for the appropriate operating system. - -Files: vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go -Copyright: 2013 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build freebsd openbsd netbsd dragonfly - -Files: vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && netbsd - +build amd64,netbsd - -Files: vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && openbsd - +build amd64,openbsd - -Files: vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package reflect is a fork of go's standard library reflection package, which - allows for deep equal with equality functions defined. - -Files: vendor/golang.org/x/crypto/openpgp/armor/armor.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is - very similar to PEM except that it has an additional CRC checksum. - -Files: vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build darwin && go1.12 - +build darwin,go1.12 - -Files: vendor/golang.org/x/net/context/go17.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build go1.7 - +build go1.7 - -Files: vendor/golang.org/x/sys/unix/fcntl.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build dragonfly || freebsd || linux || netbsd || openbsd - +build dragonfly freebsd linux netbsd openbsd - -Files: vendor/golang.org/x/sys/unix/syscall_illumos.go -Copyright: 2021 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - illumos system calls not present on Solaris. - . - go:build amd64 && illumos - +build amd64,illumos - -Files: vendor/golang.org/x/net/http2/http2.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package http2 implements the HTTP/2 protocol. - . - This package is low-level and intended to be used directly by very - few people. Most users will use it indirectly through the automatic - use by the net/http package (from Go 1.6 and later). - For use in earlier Go versions see ConfigureServer. (Transport support - requires Go 1.6 or later) - . - See https://http2.github.io/ for more information on HTTP/2. - . - See https://http2.golang.org/ for a test server running this code. - -Files: vendor/golang.org/x/sys/unix/ptrace_darwin.go -Copyright: 2020 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build darwin && !ios - +build darwin,!ios - -Files: vendor/golang.org/x/net/idna/idna9.0.0.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build !go1.10 - +build !go1.10 - . - Package idna implements IDNA2008 using the compatibility processing - defined by UTS (Unicode Technical Standard) #46, which defines a standard to - deal with the transition from IDNA2003. - . - IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC - 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. - UTS #46 is defined in https://www.unicode.org/reports/tr46. - See https://unicode.org/cldr/utility/idna.jsp for a visualization of the - differences between these two standards. - -Files: vendor/golang.org/x/sys/unix/syscall_linux_arm.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm && linux - +build arm,linux - -Files: docs/cri/proposal.md -Copyright: __NO_COPYRIGHT__ in: docs/cri/proposal.md -License: __UNKNOWN__ - This proposal aims to integrate [containerd](https://github.com/containerd/containerd) with Kubelet against the [container runtime interface (CRI)](https://github.com/kubernetes/kubernetes/blob/v1.6.0/pkg/kubelet/api/v1alpha1/runtime/api.proto). - ## Background - Containerd is a core container runtime, which provides the minimum set of functionalities to manage the complete container lifecycle of its host system, including container execution and supervision, image distribution and storage, etc. - . - Containerd was [introduced in Docker 1.11](https://blog.docker.com/2016/04/docker-engine-1-11-runc/), used to manage [runC](https://runc.io/) containers on the node. As shown below, it creates a containerd-shim for each container, and the shim manages the lifecycle of its corresponding container. - ![containerd](./containerd.png) - . - In Dec. 2016, Docker Inc. spun it out into a standalone component, and donated it to [CNCF](https://www.cncf.io/) in Mar. 2017. - . - ## Motivation - Containerd is one potential alternative to Docker as the runtime for Kubernetes clusters. *Compared with Docker*, containerd has pros and cons. - ### Pros - * **Stability**: Containerd has limited scope and slower feature velocity, which is expected to be more stable. - * **Compatibility**: The scope of containerd aligns with Kubernetes' requirements. It provides the required functionalities and the flexibility for areas like image pulling, networking, volume and logging etc. - * **Performance**: - * Containerd consumes less resource than Docker at least because it's a subset of Docker; - * Containerd CRI integration eliminates an extra hop in the stack (as shown below). ![performance](./performance.png) - * **Neutral Foundation**: Containerd is part of CNCF now. - ### Cons - * **User Adoption**: - * Ideally, Kubernetes users don't interact with the underlying container runtime directly. However, for the lack of debug toolkits, sometimes users still need to login the node to debug with Docker CLI directly. - * Containerd provides barebone CLIs [ctr](https://github.com/containerd/containerd/tree/master/cmd/ctr) and [dist](https://github.com/containerd/containerd/tree/master/cmd/dist) for development and debugging purpose, but they may not be sufficient and necessary. Additionally, presuming these are sufficient and necessary tools, a plan and time would be needed to sufficiently document these CLIs and educate users in their use. - * **Maturity**: The rescoped containerd is pretty new, and it's still under heavy development. - ## Goals - * Make sure containerd meets the requirement of Kubernetes, now and into the foreseeable future. - * Implement containerd CRI shim and make sure it provides equivalent functionality, usability and debuggability. - * Improve Kubernetes by taking advantage of the flexibility provided by containerd. - ## Design - The following sections discuss the design aspects of the containerd CRI integration. For the purposes of this doc, the containerd CRI integration will be referred to as `CRI-containerd`. - ### Container Lifecycle - CRI-containerd relies on containerd to manage container lifecycle. - . - Ideally, CRI-containerd only needs to do api translation and information reorganization. However, CRI-containerd needs to maintain some metadata because: - * There is a mismatch between container lifecycle of CRI and containerd - containerd only tracks running processes, once the container and it's corresponding containerd-shim exit, the container is no longer visible in the containerd API. - * Some sandbox/container metadata is not provided by containerd, and we can not leverage OCI runtime annotation to store it because of the container lifecycle mismatch, e.g. labels/annotations, `PodSandboxID` of a container, `FinishedAt` timestamp, `ExitCode`, `Mounts` etc. - . - CRI-containerd should checkpoint these metadata itself or use [containerd metadata service](https://github.com/containerd/containerd/blob/0a5544d8c4dab44dfc682f5ad07f1cd011c0a115/design/plugins.md#core) if available. - ### Container Logging - Containerd doesn't provide persistent container log. It redirects container STDIO into different FIFOs. - . - CRI-containerd should start a goroutine (process/container in the future) to: - * Continuously drain the FIFO; - * Decorate the log line into [CRI-defined format](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/kubelet-cri-logging.md#proposed-solution); - * Write the log into [CRI-defined log path](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/kubelet-cri-logging.md#proposed-solution). - ### Container Streaming - Containerd supports creating a process in the container with `Exec`, and the STDIO is also exposed as FIFOs. Containerd also supports resizing console of a specific process with `Pty`. - . - CRI-containerd could reuse the [streaming server](https://github.com/kubernetes/kubernetes/blob/release-1.6/pkg/kubelet/server/streaming/server.go), it should implement the [streaming runtime interface](https://github.com/kubernetes/kubernetes/blob/release-1.6/pkg/kubelet/server/streaming/server.go#L61-L65). - . - For different CRI streaming functions: - * `ExecSync`: CRI-containerd should use `Exec` to create the exec process, collect the stdout/stderr of the process, and wait for the process to terminate. - * `Exec`: CRI-containerd should use `Exec` to create the exec process, create a goroutine (process/container) to redirect streams, and wait for the process to terminate. - * `Attach`: CRI-containerd should create a goroutine (process/container) to read the existing container log to the output, redirect streams of the init process, and wait for any stream to be closed. - -Files: vendor/golang.org/x/sys/unix/asm_linux_arm64.s -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && arm64 && gc - +build linux - +build arm64 - +build gc - -Files: vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go -Copyright: 2013 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build darwin - -Files: vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go -Copyright: 2011 Open Knowledge Foundation Ltd. -License: __UNKNOWN__ - HTTP Content-Type Autonegotiation. - . - The functions in this package implement the behaviour specified in - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - . - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - . - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - . - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Files: vendor/github.com/docker/spdystream/spdy/types.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package spdy implements the SPDY protocol (currently SPDY/3), described in - http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. - -Files: vendor/golang.org/x/crypto/openpgp/read.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package openpgp implements high level operations on OpenPGP messages. - -Files: vendor/golang.org/x/net/http2/flow.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Flow control - -Files: vendor/golang.org/x/sys/unix/gccgo_c.c -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build gccgo - +build !aix - -Files: vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package ctxhttp provides helper functions for performing context-aware HTTP requests. - -Files: vendor/github.com/miekg/pkcs11/pkcs11f.h -Copyright: OASIS Open 2016. / /Distributed under the terms of the OASIS IPR Policy, [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY -License: __UNKNOWN__ - IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A - PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. - . - Latest version of the specification: - http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html - . - This header file contains pretty much everything about all the - Cryptoki function prototypes. Because this information is - used for more than just declaring function prototypes, the - order of the functions appearing herein is important, and - should not be altered. - . - General-purpose - -Files: vendor/golang.org/x/sys/unix/syscall_netbsd.go -Copyright: 2009-2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - NetBSD system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and wrap - it in our own nicer implementation, either here or in - -Files: vendor/golang.org/x/term/term_unsupported.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 - -Files: vendor/golang.org/x/sys/unix/env_unix.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos - +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - . - Unix environment variables. - -Files: vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && solaris - +build amd64,solaris - -Files: vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos - +build aix darwin freebsd linux netbsd openbsd solaris zos - -Files: vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && (mips || mipsle) - +build linux - +build mips mipsle - -Files: vendor/github.com/russross/blackfriday/v2/inline.go -Copyright: 2011 Russ Ross . Distributed under the Simplified BSD License. See README.md for details. -License: __UNKNOWN__ - Functions to parse inline elements. - -Files: vendor/golang.org/x/sys/windows/race0.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows,!race - -Files: vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build go1.5 - -Files: vendor/golang.org/x/sys/unix/syscall_darwin_386.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build 386 && darwin - +build 386,darwin - -Files: vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build arm64 && netbsd - +build arm64,netbsd - -Files: vendor/golang.org/x/sys/windows/aliases.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - +build go1.9 - -Files: vendor/github.com/coreos/go-systemd/v22/NOTICE -Copyright: 2018 CoreOS, Inc -License: __UNKNOWN__ - This product includes software developed at CoreOS, Inc. - (http://www.coreos.com/). - -Files: vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && (ppc64 || ppc64le) && gc - +build linux - +build ppc64 ppc64le - +build gc - -Files: vendor/github.com/google/go-cmp/cmp/internal/function/func.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package function provides functionality for identifying function types. - -Files: vendor/golang.org/x/xerrors/doc.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package xerrors implements functions to manipulate errors. - . - This package is based on the Go 2 proposal for error values: - https://golang.org/design/29934-error-values - . - These functions were incorporated into the standard library's errors package - in Go 1.13: - - Is - - As - - Unwrap - . - Also, Errorf's %w verb was incorporated into fmt.Errorf. - . - Use this package to get equivalent behavior in all supported Go versions. - . - No other features of this package were included in Go 1.13, and at present - there are no plans to include any of them. - -Files: vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build sparc64 && linux - +build sparc64,linux - -Files: vendor/golang.org/x/sys/plan9/syscall_plan9.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Plan 9 system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and - wrap it in our own nicer implementation. - -Files: vendor/golang.org/x/crypto/ed25519/ed25519_go113.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build go1.13 - +build go1.13 - . - Package ed25519 implements the Ed25519 signature algorithm. See - https://ed25519.cr.yp.to/. - . - These functions are also compatible with the “Ed25519” function defined in - RFC 8032. However, unlike RFC 8032's formulation, this package's private key - representation includes a public key suffix to make multiple signing - operations with the same key more efficient. This package refers to the RFC - 8032 private key as the “seed”. - . - Beginning with Go 1.13, the functionality of this package was moved to the - standard library as crypto/ed25519. This package only acts as a compatibility - wrapper. - -Files: vendor/golang.org/x/sys/unix/syscall_freebsd.go -Copyright: 2009-2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - FreeBSD system calls. - This file is compiled as ordinary Go code, - but it is also input to mksyscall, - which parses the //sys lines and generates system call stubs. - Note that sometimes we use a lowercase //sys name and wrap - it in our own nicer implementation, either here or in - -Files: vendor/golang.org/x/sys/plan9/mkall.sh -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - The plan9 package provides access to the raw system call - interface of the underlying operating system. Porting Go to - a new architecture/operating system combination requires - some manual effort, though there are tools that automate - much of the process. The auto-generated files have names - beginning with z. - . - This script runs or (given -n) prints suggested commands to generate z files - for the current system. Running those commands is not automatic. - This script is documentation more than anything else. - -Files: vendor/golang.org/x/sys/unix/readdirent_getdirentries.go -Copyright: 2019 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build darwin - +build darwin - -Files: vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && (ppc64le || ppc64) && gc - +build linux - +build ppc64le ppc64 - +build gc - -Files: vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go -Copyright: 2011 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package elgamal implements ElGamal encryption, suitable for OpenPGP, - as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on - Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, - n. 4, 1985, pp. 469-472. - . - This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it - unsuitable for other protocols. RSA should be used in preference in any - case. - -Files: vendor/golang.org/x/sys/unix/syscall_linux_amd64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && linux - +build amd64,linux - -Files: vendor/golang.org/x/net/websocket/websocket.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package websocket implements a client and server for the WebSocket protocol - as specified in RFC 6455. - . - This package currently lacks some features found in alternative - and more actively maintained WebSocket packages: - . - https://godoc.org/github.com/gorilla/websocket - https://godoc.org/nhooyr.io/websocket - -Files: vendor/golang.org/x/sys/unix/asm_linux_mipsx.s -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && (mips || mipsle) && gc - +build linux - +build mips mipsle - +build gc - -Files: vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && gc && 386 - +build linux,gc,386 - -Files: vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build linux && (ppc64 || ppc64le) - +build linux - +build ppc64 ppc64le - -Files: vendor/google.golang.org/grpc/MAINTAINERS.md -Copyright: __NO_COPYRIGHT__ in: vendor/google.golang.org/grpc/MAINTAINERS.md -License: __UNKNOWN__ - See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) - for governance guidelines and how to become a maintainer. - See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) - for general contribution guidelines. - . - ## Maintainers (in alphabetical order) - - [canguler](https://github.com/canguler), Google LLC - - [cesarghali](https://github.com/cesarghali), Google LLC - - [dfawley](https://github.com/dfawley), Google LLC - - [easwars](https://github.com/easwars), Google LLC - - [jadekler](https://github.com/jadekler), Google LLC - - [menghanl](https://github.com/menghanl), Google LLC - - [srini100](https://github.com/srini100), Google LLC - . - ## Emeritus Maintainers (in alphabetical order) - - [adelez](https://github.com/adelez), Google LLC - - [iamqizhao](https://github.com/iamqizhao), Google LLC - - [jtattermusch](https://github.com/jtattermusch), Google LLC - - [lyuxuan](https://github.com/lyuxuan), Google LLC - - [makmukhi](https://github.com/makmukhi), Google LLC - - [matt-kwong](https://github.com/matt-kwong), Google LLC - - [nicolasnoble](https://github.com/nicolasnoble), Google LLC - - [yongni](https://github.com/yongni), Google LLC - -Files: vendor/golang.org/x/net/http2/server.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - TODO: turn off the serve goroutine when idle, so - an idle conn only has the readFrames goroutine active. (which could - also be optimized probably to pin less memory in crypto/tls). This - would involve tracking when the serve goroutine is active (atomic - -Files: vendor/github.com/miekg/pkcs11/pkcs11.go -Copyright: 2013 Miek Gieben. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package pkcs11 is a wrapper around the PKCS#11 cryptographic library. - -Files: vendor/github.com/imdario/mergo/doc.go -Copyright: 2009 The Go Authors. - 2013 Dario Castañé. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - . - Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - . - Status - . - It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - . - Important note - . - Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - . - Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - . - If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - . - Install - . - Do your usual installation procedure: - . - go get github.com/imdario/mergo - . - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - . - Usage - . - You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -Files: vendor/golang.org/x/sys/unix/endian_big.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 - +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 - -Files: vendor/golang.org/x/sys/windows/svc/debug/service.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build windows - . - Package debug provides facilities to execute svc.Handler on console. - -Files: vendor/golang.org/x/net/http2/hpack/hpack.go -Copyright: 2014 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package hpack implements HPACK, a compression format for - efficiently representing HTTP header fields in the context of HTTP/2. - . - See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 - -Files: vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build amd64 && freebsd - +build amd64,freebsd - -Files: vendor/golang.org/x/net/http2/go111.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build go1.11 - +build go1.11 - -Files: vendor/golang.org/x/sys/unix/affinity_linux.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - CPU affinity functions - -Files: vendor/golang.org/x/time/rate/rate.go -Copyright: 2015 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package rate provides a rate limiter. - -Files: vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix && ppc64 - +build aix,ppc64 - -Files: vendor/golang.org/x/net/idna/idna10.0.0.go -Copyright: 2016 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build go1.10 - +build go1.10 - . - Package idna implements IDNA2008 using the compatibility processing - defined by UTS (Unicode Technical Standard) #46, which defines a standard to - deal with the transition from IDNA2003. - . - IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC - 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. - UTS #46 is defined in https://www.unicode.org/reports/tr46. - See https://unicode.org/cldr/utility/idna.jsp for a visualization of the - differences between these two standards. - -Files: vendor/golang.org/x/text/transform/transform.go -Copyright: 2013 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package transform provides reader and writer wrappers that transform the - bytes passing through as well as various transformations. Example - transformations provided by other packages include normalization and - conversion between character sets. - -Files: vendor/golang.org/x/sys/unix/asm_bsd_amd64.s -Copyright: 2021 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc - +build darwin dragonfly freebsd netbsd openbsd - +build gc - -Files: vendor/golang.org/x/net/context/go19.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build go1.9 - +build go1.9 - -Files: vendor/golang.org/x/sys/unix/syscall_aix_ppc.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix && ppc - +build aix,ppc - -Files: vendor/golang.org/x/sys/plan9/race0.go -Copyright: 2012 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build plan9,!race - -Files: vendor/golang.org/x/crypto/openpgp/errors/errors.go -Copyright: 2010 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package errors contains common error types for the OpenPGP packages. - -Files: vendor/golang.org/x/sys/unix/dev_aix_ppc.go -Copyright: 2018 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build aix && ppc - +build aix,ppc - . - Functions to access/create device major and minor numbers matching the - encoding used by AIX. - -Files: vendor/github.com/docker/go-units/README.md -Copyright: 2015 Docker, Inc. -License: __UNKNOWN__ - go-units is licensed under the Apache License, Version 2.0. - See [LICENSE](LICENSE) for the full text of the license. - -Files: vendor/github.com/miekg/pkcs11/pkcs11.h -Copyright: OASIS Open 2016. / /Distributed under the terms of the OASIS IPR Policy, [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY -License: __UNKNOWN__ - IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A - PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. - . - Latest version of the specification: - http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html - -Files: vendor/golang.org/x/sys/unix/syscall_netbsd_386.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build 386 && netbsd - +build 386,netbsd - -Files: vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go -Copyright: 2021 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - +build go1.13 - -Files: vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go -Copyright: 2017 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package cmpopts provides common options for the cmp package. - -Files: vendor/golang.org/x/sys/unix/syscall_openbsd_386.go -Copyright: 2009 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - go:build 386 && openbsd - +build 386,openbsd - -Files: vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go -Copyright: 2020 The Go Authors. -License: __UNKNOWN__ - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. - . - Package unsafeheader contains header declarations for the Go runtime's - slice and string implementations. - . - This package allows x/sys to use types equivalent to - reflect.SliceHeader and reflect.StringHeader without introducing - a dependency on the (relatively heavy) "reflect" package. - -Files: vendor/github.com/gogo/protobuf/plugin/size/size.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The size plugin generates a Size or ProtoSize method for each message. - This is useful with the MarshalTo method generated by the marshalto plugin and the - -Files: vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The marshalto plugin generates a Marshal and MarshalTo method for each message. - The `Marshal() ([]byte, error)` method results in the fact that the message - implements the Marshaler interface. - This allows proto.Marshal to be faster by calling the generated Marshal method rather than using reflect to Marshal the struct. - . - If is enabled by the following extensions: - . - - marshaler - -Files: vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The stringer plugin generates a String method for each message. - . - It is enabled by the following extensions: - . - - stringer - -Files: vendor/github.com/gogo/protobuf/plugin/equal/equal.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The equal plugin generates an Equal and a VerboseEqual method for each message. - These equal methods are quite obvious. - The only difference is that VerboseEqual returns a non nil error if it is not equal. - This error contains more detail on exactly which part of the message was not equal to the other message. - The idea is that this is useful for debugging. - . - Equal is enabled using the following extensions: - . - - equal - -Files: vendor/github.com/gogo/protobuf/plugin/description/description.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The description (experimental) plugin generates a Description method for each message. - -Files: vendor/github.com/gogo/protobuf/plugin/union/union.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The onlyone plugin generates code for the onlyone extension. - All fields must be nullable and only one of the fields may be set, like a union. - Two methods are generated - . - GetValue() interface{} - . - and - . - SetValue(v interface{}) (set bool) - . - These provide easier interaction with a onlyone. - . - The onlyone extension is not called union as this causes compile errors in the C++ generated code. - There can only be one ;) - . - It is enabled by the following extensions: - . - - onlyone - -Files: vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The testgen plugin generates Test and Benchmark functions for each message. - . - Tests are enabled using the following extensions: - . - - testgen - -Files: vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go -Copyright: 2018 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - +build purego appengine js - . - This file contains an implementation of proto field accesses using package reflect. - -Files: vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The gostring plugin generates a GoString method for each message. - The GoString method is called whenever you use a fmt.Printf as such: - . - fmt.Printf("%#v", mymessage) - . - or whenever you actually call GoString() - The output produced by the GoString method can be copied from the output into code and used to set a variable. - It is totally valid Go Code and is populated exactly as the struct that was printed out. - . - It is enabled by the following extensions: - . - - gostring - -Files: vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The defaultcheck plugin is used to check whether nullable is not used incorrectly. - For instance: - An error is caused if a nullable field: - - has a default value, - - is an enum which does not start at zero, - - is used for an extension, - - is used for a native proto3 type, - - is used for a repeated native type. - . - An error is also caused if a field with a default value is used in a message: - - which is a face. - - without getters. - . - It is enabled by the following extensions: - . - - nullable - . - For incorrect usage of nullable with tests see: - . - github.com/gogo/protobuf/test/nullableconflict - -Files: vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The enumstringer (experimental) plugin generates a String method for each enum. - . - It is enabled by the following extensions: - -Files: vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The oneofcheck plugin is used to check whether oneof is not used incorrectly. - For instance: - An error is caused if a oneof field: - - is used in a face - - is an embedded field - -Files: vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The unmarshal plugin generates a Unmarshal method for each message. - The `Unmarshal([]byte) error` method results in the fact that the message - implements the Unmarshaler interface. - The allows proto.Unmarshal to be faster by calling the generated Unmarshal method rather than using reflect. - . - If is enabled by the following extensions: - . - - unmarshaler - -Files: vendor/github.com/gogo/protobuf/gogoproto/doc.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Package gogoproto provides extensions for protocol buffers to achieve: - . - - fast marshalling and unmarshalling. - - peace of mind by optionally generating test and benchmark code. - - more canonical Go structures. - - less typing by optionally generating extra helper code. - - goprotobuf compatibility - . - More Canonical Go Structures - . - A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. - You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. - Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. - . - - nullable, if false, a field is generated without a pointer (see warning below). - - embed, if true, the field is generated as an embedded field. - - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 - - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. - - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. - - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - -Files: vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The embedcheck plugin is used to check whether embed is not used incorrectly. - For instance: - An embedded message has a generated string method, but the is a member of a message which does not. - This causes a warning. - An error is caused by a namespace conflict. - . - It is enabled by the following extensions: - . - - embed - -Files: vendor/github.com/gogo/protobuf/plugin/face/face.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The face plugin generates a function will be generated which can convert a structure which satisfies an interface (face) to the specified structure. - This interface contains getters for each of the fields in the struct. - The specified struct is also generated with the getters. - This means that getters should be turned off so as not to conflict with face getters. - This allows it to satisfy its own face. - . - It is enabled by the following extensions: - . - - face - -Files: vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go -Copyright: 2018 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - +build !purego,!appengine,!js - . - This file contains the implementation of the proto field accesses using package unsafe. - -Files: vendor/github.com/gogo/protobuf/plugin/populate/populate.go -Copyright: 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-2-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The populate plugin generates a NewPopulated function. - This function returns a newly populated structure. - . - It is enabled by the following extensions: - . - - populate - -Files: vendor/github.com/golang/protobuf/ptypes/doc.go -Copyright: 2016 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Package ptypes contains code for interacting with well-known types. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go -Copyright: 2016 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Package descriptor provides functions for obtaining protocol buffer - descriptors for generated Go types. - . - These functions cannot go in package proto because they depend on the - generated protobuf descriptor messages, which themselves depend on proto. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap/remap.go -Copyright: 2017 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Package remap handles tracking the locations of Go tokens in a source text - across a rewrite by the Go formatter. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile -Copyright: 2010 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Not stored here, but plugin.proto is in https://github.com/google/protobuf/ - at src/google/protobuf/compiler/plugin.proto - Also we need to fix an import. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go -Copyright: 2015 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Package grpc outputs gRPC service descriptions in Go code. - It runs as a plugin for the Go protocol buffer compiler plugin. - It is linked in to protoc-gen-go. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/types/doc.go -Copyright: 2016 The Go Authors. https://github.com/golang/protobuf -License: BSD-3-Clause - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - Package types contains code for interacting with well-known types. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -Files: vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go -Copyright: 2010 The Go Authors. https://github.com/golang/protobuf - 2013 The GoGo Authors. http://github.com/gogo/protobuf -License: BSD-3-Clause - Go support for Protocol Buffers - Google's data interchange format - . - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - . - The code generator for the plugin for the Google protocol buffer compiler. - It generates Go code from the protocol buffer description files read by the - main routine. - . - On Debian systems, the complete text of the BSD 3-clause "New" or "Revised" - License can be found in `/usr/share/common-licenses/BSD'. - -#---------------------------------------------------------------------------- -# xml and html files (skipped): -# vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/HookTest.wprp - -#---------------------------------------------------------------------------- -# huge files (skipped): -# vendor/k8s.io/api/core/v1/generated.pb.go - -#---------------------------------------------------------------------------- -# Files marked as NO_LICENSE_TEXT_FOUND may be covered by the following -# license/copyright files. - -#---------------------------------------------------------------------------- -# License file: LICENSE - . - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - . - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - . - 1. Definitions. - . - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - . - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - . - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - . - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - . - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - . - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - . - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - . - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - . - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - . - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - . - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - . - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - . - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - . - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - . - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - . - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - . - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - . - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - . - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - . - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - . - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - . - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - . - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - . - END OF TERMS AND CONDITIONS - . - Copyright The containerd Authors - . - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/debian/docs b/debian/docs deleted file mode 100644 index 86f744e..0000000 --- a/debian/docs +++ /dev/null @@ -1,2 +0,0 @@ -README.md -docs diff --git a/debian/golang-github-containerd-containerd-dev.install b/debian/golang-github-containerd-containerd-dev.install deleted file mode 100644 index 3e409b1..0000000 --- a/debian/golang-github-containerd-containerd-dev.install +++ /dev/null @@ -1 +0,0 @@ -usr/share/gocode/src diff --git a/debian/rules b/debian/rules deleted file mode 100755 index 26940ce..0000000 --- a/debian/rules +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/make -f - -include /usr/share/dpkg/architecture.mk - -# temporary build path (see http://golang.org/doc/code.html#GOPATH) -OUR_GOPATH := $(CURDIR)/.gopath -export GOPATH := $(OUR_GOPATH) -export GOCACHE := $(CURDIR)/.gocache - -# https://blog.golang.org/go116-module-changes (TODO figure out a new solution for Go 1.17+) -export GO111MODULE := auto - -# riscv64 doesn't support cgo -# https://github.com/golang/go/issues/36641 -ifeq (riscv64, $(DEB_BUILD_ARCH)) -TAGS += no_btrfs -SKIP += github.com/containerd/containerd/snapshots/btrfs -endif - -# build explicitly against Go 1.13 (for now): -# https://github.com/containerd/containerd/issues/4509 -# https://github.com/containerd/containerd/pull/4050 -# https://github.com/golang/go/issues/37942 -# (containerd 1.4+ and Go 1.15+ can go back to "golang-go") -export PATH := /usr/lib/go-1.13/bin:$(PATH) - -override_dh_gencontrol: - # use "dh_golang" to generate "misc:Built-Using" (via "go list") - DH_GOLANG_BUILDPKG=' \ - -tags "$(TAGS)" \ - github.com/containerd/containerd/cmd/containerd \ - github.com/containerd/containerd/cmd/containerd-shim \ - github.com/containerd/containerd/cmd/containerd-shim-runc-v1 \ - github.com/containerd/containerd/cmd/containerd-shim-runc-v2 \ - github.com/containerd/containerd/cmd/containerd-stress \ - github.com/containerd/containerd/cmd/ctr \ - ' dh_golang --builddirectory='$(OUR_GOPATH:$(CURDIR)/%=%)' - dh_gencontrol - -override_dh_auto_configure: - # copy pristine source for "/usr/share/gocode" to get into "golang-github-containerd-containerd-dev" before we muddy it with build artifacts, etc - mkdir -p .pristine-source - tar -c --exclude=debian --exclude=.pc --exclude=.pristine-source --exclude=vendor . | tar -xC .pristine-source - # set up GOPATH symlink farm - mkdir -p '$(OUR_GOPATH)/src/github.com/containerd' - ln -sfT '$(CURDIR)' '$(OUR_GOPATH)/src/github.com/containerd/containerd' - -override_dh_auto_build: - cd '$(OUR_GOPATH)/src/github.com/containerd/containerd' \ - && make \ - LDFLAGS='' \ - VERSION='$(shell dpkg-parsechangelog -SVersion)' \ - REVISION='' \ - BUILDTAGS='$(TAGS)' \ - && make man - -override_dh_auto_test: -ifneq (arm, $(DEB_HOST_ARCH_CPU)) # skip the tests on armhf ("--- FAIL: TestParseSelector/linux (0.00s) platforms_test.go:292: arm support not fully implemented: not implemented") - cd '$(OUR_GOPATH)/src/github.com/containerd/containerd' && make test SKIPTESTS='$(SKIP)' -endif - -override_dh_auto_install: - make install DESTDIR='$(CURDIR)/debian/tmp/usr' - make install-man DESTDIR='$(CURDIR)/debian/tmp/usr/share' - mkdir -p debian/tmp/usr/share/gocode/src/github.com/containerd \ - && mv .pristine-source debian/tmp/usr/share/gocode/src/github.com/containerd/containerd - -override_dh_installsystemd: - dh_installsystemd - # replace "/usr/local/bin/containerd" with "/usr/bin/containerd" in our systemd service file - grep '/usr/local/bin/containerd' debian/*/lib/systemd/system/containerd.service \ - && sed -i 's!/usr/local/bin/containerd!/usr/bin/containerd!g' debian/*/lib/systemd/system/containerd.service \ - && ! grep '/usr/local/bin/containerd' debian/*/lib/systemd/system/containerd.service - -%: - dh $@ diff --git a/debian/source/format b/debian/source/format deleted file mode 100644 index 89ae9db..0000000 --- a/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (native) diff --git a/debian/tests/basic-smoke b/debian/tests/basic-smoke deleted file mode 100755 index c43fda6..0000000 --- a/debian/tests/basic-smoke +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -Eeuo pipefail -set -x - -# start up containerd -containerd & - -# pull the "busybox" image from Docker Hub -ctr images pull docker.io/library/busybox:latest - -# run it and capture the output -output="$(ctr run --rm docker.io/library/busybox:latest "test-$$-$RANDOM" echo success)" - -# ensure the output was exactly what we expected -[ "$output" = 'success' ] diff --git a/debian/tests/control b/debian/tests/control deleted file mode 100644 index 9ed8924..0000000 --- a/debian/tests/control +++ /dev/null @@ -1,3 +0,0 @@ -Tests: basic-smoke -Depends: ca-certificates, @ -Restrictions: allow-stderr isolation-machine needs-root diff --git a/debian/watch b/debian/watch deleted file mode 100644 index f3e9513..0000000 --- a/debian/watch +++ /dev/null @@ -1,5 +0,0 @@ -version=3 -opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/containerd-$1\.tar\.gz/,\ -uversionmangle=s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha|preview)\d*)$/$1~$2/,\ -dversionmangle=s/[~+]ds\d*$// \ - https://github.com/containerd/containerd/tags .*/v?(\d\S*)\.tar\.gz diff --git a/defaults/defaults_darwin.go b/defaults/defaults_darwin.go new file mode 100644 index 0000000..1391884 --- /dev/null +++ b/defaults/defaults_darwin.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = "/var/lib/containerd" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/var/run/containerd" + // DefaultAddress is the default unix socket address + DefaultAddress = "/var/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/var/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/var/run/containerd/fifo" + // DefaultRuntime would be a multiple of choices, thus empty + DefaultRuntime = "" + // DefaultConfigDir is the default location for config files. + DefaultConfigDir = "/etc/containerd" +) diff --git a/defaults/defaults_unix.go b/defaults/defaults_unix.go index 6b69cd0..8e2619a 100644 --- a/defaults/defaults_unix.go +++ b/defaults/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/defaults/defaults_windows.go b/defaults/defaults_windows.go index a807000..9f4bed8 100644 --- a/defaults/defaults_windows.go +++ b/defaults/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/diff.go b/diff.go index 445df01..28012b1 100644 --- a/diff.go +++ b/diff.go @@ -86,6 +86,9 @@ func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...di } func toDescriptor(d *types.Descriptor) ocispec.Descriptor { + if d == nil { + return ocispec.Descriptor{} + } return ocispec.Descriptor{ MediaType: d.MediaType, Digest: d.Digest, diff --git a/diff/apply/apply.go b/diff/apply/apply.go index 1d0a95e..d4b3423 100644 --- a/diff/apply/apply.go +++ b/diff/apply/apply.go @@ -18,8 +18,8 @@ package apply import ( "context" + "fmt" "io" - "io/ioutil" "time" "github.com/containerd/containerd/content" @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/mount" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -65,13 +64,13 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [ var config diff.ApplyConfig for _, o := range opts { if err := o(ctx, desc, &config); err != nil { - return emptyDesc, errors.Wrap(err, "failed to apply config opt") + return emptyDesc, fmt.Errorf("failed to apply config opt: %w", err) } } ra, err := s.store.ReaderAt(ctx, desc) if err != nil { - return emptyDesc, errors.Wrap(err, "failed to get reader from content store") + return emptyDesc, fmt.Errorf("failed to get reader from content store: %w", err) } defer ra.Close() @@ -80,7 +79,7 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [ processors = append(processors, processor) for { if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { - return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType) + return emptyDesc, fmt.Errorf("failed to get stream processor for %s: %w", desc.MediaType, err) } processors = append(processors, processor) if processor.MediaType() == ocispec.MediaTypeImageLayer { @@ -99,7 +98,7 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [ } // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { + if _, err := io.Copy(io.Discard, rc); err != nil { return emptyDesc, err } diff --git a/diff/apply/apply_darwin.go b/diff/apply/apply_darwin.go new file mode 100644 index 0000000..8865402 --- /dev/null +++ b/diff/apply/apply_darwin.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package apply + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/mount" +) + +func apply(ctx context.Context, mounts []mount.Mount, r io.Reader) error { + // We currently do not support mounts nor bind mounts on MacOS in the containerd daemon. + // Using this as an exception to enable native snapshotter and allow further research. + if len(mounts) == 1 && mounts[0].Type == "bind" { + path := mounts[0].Source + _, err := archive.Apply(ctx, path, r) + return err + } + + return mount.WithTempMount(ctx, mounts, func(root string) error { + _, err := archive.Apply(ctx, root, r) + return err + }) +} diff --git a/diff/apply/apply_linux.go b/diff/apply/apply_linux.go index b3775c7..55602db 100644 --- a/diff/apply/apply_linux.go +++ b/diff/apply/apply_linux.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -20,6 +18,7 @@ package apply import ( "context" + "fmt" "io" "strings" @@ -27,7 +26,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/pkg/userns" - "github.com/pkg/errors" ) func apply(ctx context.Context, mounts []mount.Mount, r io.Reader) error { @@ -88,7 +86,7 @@ func getOverlayPath(options []string) (upper string, lower []string, err error) } } if upper == "" { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "upperdir not found") + return "", nil, fmt.Errorf("upperdir not found: %w", errdefs.ErrInvalidArgument) } return @@ -113,22 +111,22 @@ func getAufsPath(options []string) (upper string, lower []string, err error) { for _, b := range strings.Split(o, sep) { if strings.HasSuffix(b, rwSuffix) { if upper != "" { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "multiple rw branch found") + return "", nil, fmt.Errorf("multiple rw branch found: %w", errdefs.ErrInvalidArgument) } upper = strings.TrimSuffix(b, rwSuffix) } else if strings.HasSuffix(b, roSuffix) { if upper == "" { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "rw branch be first") + return "", nil, fmt.Errorf("rw branch be first: %w", errdefs.ErrInvalidArgument) } lower = append(lower, strings.TrimSuffix(b, roSuffix)) } else { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "unhandled aufs suffix") + return "", nil, fmt.Errorf("unhandled aufs suffix: %w", errdefs.ErrInvalidArgument) } } } if upper == "" { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "rw branch not found") + return "", nil, fmt.Errorf("rw branch not found: %w", errdefs.ErrInvalidArgument) } return } diff --git a/diff/apply/apply_linux_test.go b/diff/apply/apply_linux_test.go index 999d506..731ce0d 100644 --- a/diff/apply/apply_linux_test.go +++ b/diff/apply/apply_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. diff --git a/diff/apply/apply_other.go b/diff/apply/apply_other.go index 01e0f11..7a0d665 100644 --- a/diff/apply/apply_other.go +++ b/diff/apply/apply_other.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !darwin +// +build !linux,!darwin /* Copyright The containerd Authors. diff --git a/diff/diff.go b/diff/diff.go index 17aab61..235d637 100644 --- a/diff/diff.go +++ b/diff/diff.go @@ -18,6 +18,7 @@ package diff import ( "context" + "io" "github.com/containerd/containerd/mount" "github.com/gogo/protobuf/types" @@ -37,6 +38,12 @@ type Config struct { // Labels are the labels to apply to the generated content Labels map[string]string + + // Compressor is a function to compress the diff stream + // instead of the default gzip compressor. Differ passes + // the MediaType of the target diff content to the compressor. + // When using this config, MediaType must be specified as well. + Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error) } // Opt is used to configure a diff operation @@ -71,6 +78,14 @@ type Applier interface { Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error) } +// WithCompressor sets the function to be used to compress the diff stream. +func WithCompressor(f func(dest io.Writer, mediaType string) (io.WriteCloser, error)) Opt { + return func(c *Config) error { + c.Compressor = f + return nil + } +} + // WithMediaType sets the media type to use for creating the diff, without // specifying the differ will choose a default. func WithMediaType(m string) Opt { diff --git a/diff/lcow/lcow.go b/diff/lcow/lcow.go index 3a3ead4..3fa19fe 100644 --- a/diff/lcow/lcow.go +++ b/diff/lcow/lcow.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* @@ -20,8 +21,8 @@ package lcow import ( "context" + "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -38,7 +39,6 @@ import ( "github.com/containerd/containerd/plugin" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -111,7 +111,7 @@ func (s windowsLcowDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mou var config diff.ApplyConfig for _, o := range opts { if err := o(ctx, desc, &config); err != nil { - return emptyDesc, errors.Wrap(err, "failed to apply config opt") + return emptyDesc, fmt.Errorf("failed to apply config opt: %w", err) } } @@ -122,14 +122,14 @@ func (s windowsLcowDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mou ra, err := s.store.ReaderAt(ctx, desc) if err != nil { - return emptyDesc, errors.Wrap(err, "failed to get reader from content store") + return emptyDesc, fmt.Errorf("failed to get reader from content store: %w", err) } defer ra.Close() processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra)) for { if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { - return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType) + return emptyDesc, fmt.Errorf("failed to get stream processor for %s: %w", desc.MediaType, err) } if processor.MediaType() == ocispec.MediaTypeImageLayer { break @@ -157,22 +157,22 @@ func (s windowsLcowDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mou err = tar2ext4.Convert(rc, outFile, tar2ext4.ConvertWhiteout, tar2ext4.AppendVhdFooter, tar2ext4.MaximumDiskSize(maxLcowVhdSizeGB)) if err != nil { - return emptyDesc, errors.Wrapf(err, "failed to convert tar2ext4 vhd") + return emptyDesc, fmt.Errorf("failed to convert tar2ext4 vhd: %w", err) } err = outFile.Sync() if err != nil { - return emptyDesc, errors.Wrapf(err, "failed to sync tar2ext4 vhd to disk") + return emptyDesc, fmt.Errorf("failed to sync tar2ext4 vhd to disk: %w", err) } outFile.Close() // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { + if _, err := io.Copy(io.Discard, rc); err != nil { return emptyDesc, err } err = security.GrantVmGroupAccess(layerPath) if err != nil { - return emptyDesc, errors.Wrapf(err, "failed GrantVmGroupAccess on layer vhd: %v", layerPath) + return emptyDesc, fmt.Errorf("failed GrantVmGroupAccess on layer vhd: %v: %w", layerPath, err) } return ocispec.Descriptor{ @@ -185,7 +185,7 @@ func (s windowsLcowDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mou // Compare creates a diff between the given mounts and uploads the result // to the content store. func (s windowsLcowDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { - return emptyDesc, errors.Wrap(errdefs.ErrNotImplemented, "windowsLcowDiff does not implement Compare method") + return emptyDesc, fmt.Errorf("windowsLcowDiff does not implement Compare method: %w", errdefs.ErrNotImplemented) } type readCounter struct { @@ -201,11 +201,11 @@ func (rc *readCounter) Read(p []byte) (n int, err error) { func mountsToLayerAndParents(mounts []mount.Mount) (string, []string, error) { if len(mounts) != 1 { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "number of mounts should always be 1 for Windows lcow-layers") + return "", nil, fmt.Errorf("number of mounts should always be 1 for Windows lcow-layers: %w", errdefs.ErrInvalidArgument) } mnt := mounts[0] if mnt.Type != "lcow-layer" { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "mount layer type must be lcow-layer") + return "", nil, fmt.Errorf("mount layer type must be lcow-layer: %w", errdefs.ErrInvalidArgument) } parentLayerPaths, err := mnt.GetParentPaths() diff --git a/diff/stream.go b/diff/stream.go index 655f9ce..44e35fc 100644 --- a/diff/stream.go +++ b/diff/stream.go @@ -18,6 +18,7 @@ package diff import ( "context" + "errors" "io" "os" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/images" "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( diff --git a/diff/stream_unix.go b/diff/stream_unix.go index d79fd71..6622c33 100644 --- a/diff/stream_unix.go +++ b/diff/stream_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -21,15 +22,15 @@ package diff import ( "bytes" "context" + "errors" "fmt" "io" "os" - "os/exec" "sync" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) // NewBinaryProcessor returns a binary processor for use with processing content streams diff --git a/diff/stream_windows.go b/diff/stream_windows.go index 19dcbac..c0bf03b 100644 --- a/diff/stream_windows.go +++ b/diff/stream_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -21,19 +19,18 @@ package diff import ( "bytes" "context" + "errors" "fmt" "io" - "io/ioutil" "os" - "os/exec" "path/filepath" "sync" winio "github.com/Microsoft/go-winio" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" ) const processorPipe = "STREAM_PROCESSOR_PIPE" @@ -157,7 +154,7 @@ func (c *binaryProcessor) Close() error { } func getUiqPath() (string, error) { - dir, err := ioutil.TempDir("", "") + dir, err := os.MkdirTemp("", "") if err != nil { return "", err } diff --git a/diff/walking/differ.go b/diff/walking/differ.go index 7fc2691..ad20788 100644 --- a/diff/walking/differ.go +++ b/diff/walking/differ.go @@ -18,10 +18,11 @@ package walking import ( "context" + "crypto/rand" "encoding/base64" + "errors" "fmt" "io" - "math/rand" "time" "github.com/containerd/containerd/archive" @@ -33,7 +34,6 @@ import ( "github.com/containerd/containerd/mount" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type walkingDiff struct { @@ -65,22 +65,29 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o } } - if config.MediaType == "" { - config.MediaType = ocispec.MediaTypeImageLayerGzip - } - var isCompressed bool - switch config.MediaType { - case ocispec.MediaTypeImageLayer: - case ocispec.MediaTypeImageLayerGzip: + if config.Compressor != nil { + if config.MediaType == "" { + return emptyDesc, errors.New("media type must be explicitly specified when using custom compressor") + } isCompressed = true - default: - return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) + } else { + if config.MediaType == "" { + config.MediaType = ocispec.MediaTypeImageLayerGzip + } + + switch config.MediaType { + case ocispec.MediaTypeImageLayer: + case ocispec.MediaTypeImageLayerGzip: + isCompressed = true + default: + return emptyDesc, fmt.Errorf("unsupported diff media type: %v: %w", config.MediaType, errdefs.ErrNotImplemented) + } } var ocidesc ocispec.Descriptor if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { - return mount.WithTempMount(ctx, upper, func(upperRoot string) error { + return mount.WithReadonlyTempMount(ctx, upper, func(upperRoot string) error { var newReference bool if config.Reference == "" { newReference = true @@ -93,10 +100,14 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o MediaType: config.MediaType, // most contentstore implementations just ignore this })) if err != nil { - return errors.Wrap(err, "failed to open writer") + return fmt.Errorf("failed to open writer: %w", err) } + + // errOpen is set when an error occurs while the content writer has not been + // committed or closed yet to force a cleanup + var errOpen error defer func() { - if err != nil { + if errOpen != nil { cw.Close() if newReference { if abortErr := s.store.Abort(ctx, config.Reference); abortErr != nil { @@ -106,22 +117,29 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o } }() if !newReference { - if err = cw.Truncate(0); err != nil { - return err + if errOpen = cw.Truncate(0); errOpen != nil { + return errOpen } } if isCompressed { dgstr := digest.SHA256.Digester() var compressed io.WriteCloser - compressed, err = compression.CompressStream(cw, compression.Gzip) - if err != nil { - return errors.Wrap(err, "failed to get compressed stream") + if config.Compressor != nil { + compressed, errOpen = config.Compressor(cw, config.MediaType) + if errOpen != nil { + return fmt.Errorf("failed to get compressed stream: %w", errOpen) + } + } else { + compressed, errOpen = compression.CompressStream(cw, compression.Gzip) + if errOpen != nil { + return fmt.Errorf("failed to get compressed stream: %w", errOpen) + } } - err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) + errOpen = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) compressed.Close() - if err != nil { - return errors.Wrap(err, "failed to write compressed diff") + if errOpen != nil { + return fmt.Errorf("failed to write compressed diff: %w", errOpen) } if config.Labels == nil { @@ -129,8 +147,8 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o } config.Labels[uncompressed] = dgstr.Digest().String() } else { - if err = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); err != nil { - return errors.Wrap(err, "failed to write diff") + if errOpen = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); errOpen != nil { + return fmt.Errorf("failed to write diff: %w", errOpen) } } @@ -140,15 +158,16 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o } dgst := cw.Digest() - if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { - if !errdefs.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to commit") + if errOpen = cw.Commit(ctx, 0, dgst, commitopts...); errOpen != nil { + if !errdefs.IsAlreadyExists(errOpen) { + return fmt.Errorf("failed to commit: %w", errOpen) } + errOpen = nil } info, err := s.store.Info(ctx, dgst) if err != nil { - return errors.Wrap(err, "failed to get info from content store") + return fmt.Errorf("failed to get info from content store: %w", err) } if info.Labels == nil { info.Labels = make(map[string]string) @@ -157,7 +176,7 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if _, ok := info.Labels[uncompressed]; !ok { info.Labels[uncompressed] = config.Labels[uncompressed] if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { - return errors.Wrap(err, "error setting uncompressed label") + return fmt.Errorf("error setting uncompressed label: %w", err) } } diff --git a/diff/windows/windows.go b/diff/windows/windows.go index 7285d29..97afb5a 100644 --- a/diff/windows/windows.go +++ b/diff/windows/windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* @@ -24,7 +25,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "time" winio "github.com/Microsoft/go-winio" @@ -40,7 +40,6 @@ import ( "github.com/containerd/containerd/plugin" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -106,20 +105,20 @@ func (s windowsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts var config diff.ApplyConfig for _, o := range opts { if err := o(ctx, desc, &config); err != nil { - return emptyDesc, errors.Wrap(err, "failed to apply config opt") + return emptyDesc, fmt.Errorf("failed to apply config opt: %w", err) } } ra, err := s.store.ReaderAt(ctx, desc) if err != nil { - return emptyDesc, errors.Wrap(err, "failed to get reader from content store") + return emptyDesc, fmt.Errorf("failed to get reader from content store: %w", err) } defer ra.Close() processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra)) for { if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { - return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType) + return emptyDesc, fmt.Errorf("failed to get stream processor for %s: %w", desc.MediaType, err) } if processor.MediaType() == ocispec.MediaTypeImageLayer { break @@ -150,7 +149,7 @@ func (s windowsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts } // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { + if _, err := io.Copy(io.Discard, rc); err != nil { return emptyDesc, err } @@ -188,7 +187,7 @@ func (s windowsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, op case ocispec.MediaTypeImageLayerGzip: isCompressed = true default: - return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) + return emptyDesc, fmt.Errorf("unsupported diff media type: %v: %w", config.MediaType, errdefs.ErrNotImplemented) } newReference := false @@ -202,7 +201,7 @@ func (s windowsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, op })) if err != nil { - return emptyDesc, errors.Wrap(err, "failed to open writer") + return emptyDesc, fmt.Errorf("failed to open writer: %w", err) } defer func() { @@ -235,12 +234,12 @@ func (s windowsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, op var compressed io.WriteCloser compressed, err = compression.CompressStream(cw, compression.Gzip) if err != nil { - return emptyDesc, errors.Wrap(err, "failed to get compressed stream") + return emptyDesc, fmt.Errorf("failed to get compressed stream: %w", err) } err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), "", layers[0], archive.AsWindowsContainerLayerPair(), archive.WithParentLayers(layers[1:])) compressed.Close() if err != nil { - return emptyDesc, errors.Wrap(err, "failed to write compressed diff") + return emptyDesc, fmt.Errorf("failed to write compressed diff: %w", err) } if config.Labels == nil { @@ -249,7 +248,7 @@ func (s windowsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, op config.Labels[uncompressed] = dgstr.Digest().String() } else { if err = archive.WriteDiff(ctx, cw, "", layers[0], archive.AsWindowsContainerLayerPair(), archive.WithParentLayers(layers[1:])); err != nil { - return emptyDesc, errors.Wrap(err, "failed to write diff") + return emptyDesc, fmt.Errorf("failed to write diff: %w", err) } } @@ -261,13 +260,13 @@ func (s windowsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, op dgst := cw.Digest() if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { if !errdefs.IsAlreadyExists(err) { - return emptyDesc, errors.Wrap(err, "failed to commit") + return emptyDesc, fmt.Errorf("failed to commit: %w", err) } } info, err := s.store.Info(ctx, dgst) if err != nil { - return emptyDesc, errors.Wrap(err, "failed to get info from content store") + return emptyDesc, fmt.Errorf("failed to get info from content store: %w", err) } if info.Labels == nil { info.Labels = make(map[string]string) @@ -276,7 +275,7 @@ func (s windowsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, op if _, ok := info.Labels[uncompressed]; !ok { info.Labels[uncompressed] = config.Labels[uncompressed] if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { - return emptyDesc, errors.Wrap(err, "error setting uncompressed label") + return emptyDesc, fmt.Errorf("error setting uncompressed label: %w", err) } } @@ -309,14 +308,14 @@ func (rc *readCounter) Read(p []byte) (n int, err error) { func mountsToLayerAndParents(mounts []mount.Mount) (string, []string, error) { if len(mounts) != 1 { - return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "number of mounts should always be 1 for Windows layers") + return "", nil, fmt.Errorf("number of mounts should always be 1 for Windows layers: %w", errdefs.ErrInvalidArgument) } mnt := mounts[0] if mnt.Type != "windows-layer" { // This is a special case error. When this is received the diff service // will attempt the next differ in the chain which for Windows is the // lcow differ that we want. - return "", nil, errors.Wrapf(errdefs.ErrNotImplemented, "windowsDiff does not support layer type %s", mnt.Type) + return "", nil, fmt.Errorf("windowsDiff does not support layer type %s: %w", mnt.Type, errdefs.ErrNotImplemented) } parentLayerPaths, err := mnt.GetParentPaths() @@ -335,39 +334,39 @@ func mountPairToLayerStack(lower, upper []mount.Mount) ([]string, error) { // May return an ErrNotImplemented, which will fall back to LCOW upperLayer, upperParentLayerPaths, err := mountsToLayerAndParents(upper) if err != nil { - return nil, errors.Wrapf(err, "Upper mount invalid") + return nil, fmt.Errorf("Upper mount invalid: %w", err) } // Trivial case, diff-against-nothing if len(lower) == 0 { if len(upperParentLayerPaths) != 0 { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "windowsDiff cannot diff a layer with parents against a null layer") + return nil, fmt.Errorf("windowsDiff cannot diff a layer with parents against a null layer: %w", errdefs.ErrInvalidArgument) } return []string{upperLayer}, nil } if len(upperParentLayerPaths) < 1 { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "windowsDiff cannot diff a layer with no parents against another layer") + return nil, fmt.Errorf("windowsDiff cannot diff a layer with no parents against another layer: %w", errdefs.ErrInvalidArgument) } lowerLayer, lowerParentLayerPaths, err := mountsToLayerAndParents(lower) if errdefs.IsNotImplemented(err) { // Upper was a windows-layer, lower is not. We can't handle that. - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "windowsDiff cannot diff a windows-layer against a non-windows-layer") + return nil, fmt.Errorf("windowsDiff cannot diff a windows-layer against a non-windows-layer: %w", errdefs.ErrInvalidArgument) } else if err != nil { - return nil, errors.Wrapf(err, "Lower mount invalid") + return nil, fmt.Errorf("Lower mount invalid: %w", err) } if upperParentLayerPaths[0] != lowerLayer { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "windowsDiff cannot diff a layer against a layer other than its own parent") + return nil, fmt.Errorf("windowsDiff cannot diff a layer against a layer other than its own parent: %w", errdefs.ErrInvalidArgument) } if len(upperParentLayerPaths) != len(lowerParentLayerPaths)+1 { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "windowsDiff cannot diff a layer against a layer with different parents") + return nil, fmt.Errorf("windowsDiff cannot diff a layer against a layer with different parents: %w", errdefs.ErrInvalidArgument) } for i, upperParent := range upperParentLayerPaths[1:] { if upperParent != lowerParentLayerPaths[i] { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "windowsDiff cannot diff a layer against a layer with different parents") + return nil, fmt.Errorf("windowsDiff cannot diff a layer against a layer with different parents: %w", errdefs.ErrInvalidArgument) } } diff --git a/docs/PLUGINS.md b/docs/PLUGINS.md index f3a16ca..b739e68 100644 --- a/docs/PLUGINS.md +++ b/docs/PLUGINS.md @@ -50,7 +50,9 @@ section for your given plugin `[proxy_plugins.myplugin]`. The `address` must refer to a local socket file which the containerd process has access to. The currently supported types are `snapshot` and `content`. -``` +```toml +version = 2 + [proxy_plugins] [proxy_plugins.customsnapshot] type = "snapshot" @@ -233,35 +235,39 @@ to load. #### Configuration Plugins are configured using the `[plugins]` section of containerd's config. -Every plugin can have its own section using the pattern `[plugins.]`. +Every plugin can have its own section using the pattern `[plugins."."]`. example configuration +```toml +version = 2 + +[plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false ``` + +To see full configuration example run `containerd config default`. +If you want to get the configuration combined with your configuration, run `containerd config dump`. + +##### Version header + +containerd has two configuration versions: +- Version 2 (Recommended): Introduced in containerd 1.3. +- Version 1 (Default): Introduced in containerd 1.0. Deprecated and will be removed in containerd 2.0. + +A configuration with Version 2 must have `version = 2` header, and must have +fully qualified plugin IDs in the `[plugins]` section: +```toml +version = 2 + +[plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false +``` + +A configuration with Version 1 may not have `version` header, and does not need fully qualified plugin IDs. +```toml [plugins] [plugins.cgroups] no_prometheus = false - [plugins.cri] - stream_server_address = "" - stream_server_port = "10010" - enable_selinux = false - sandbox_image = "k8s.gcr.io/pause:3.5" - stats_collect_period = 10 - systemd_cgroup = false - [plugins.cri.containerd] - snapshotter = "overlayfs" - [plugins.cri.containerd.default_runtime] - runtime_type = "io.containerd.runtime.v1.linux" - runtime_engine = "" - runtime_root = "" - [plugins.cri.containerd.untrusted_workload_runtime] - runtime_type = "" - runtime_engine = "" - runtime_root = "" - [plugins.cri.cni] - bin_dir = "/opt/cni/bin" - conf_dir = "/etc/cni/net.d" - [plugins.cri.registry] - [plugins.cri.registry.mirrors] - [plugins.cri.registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] -``` +``` \ No newline at end of file diff --git a/docs/RUNC.md b/docs/RUNC.md index 281c4b3..1b4a99d 100644 --- a/docs/RUNC.md +++ b/docs/RUNC.md @@ -1,5 +1,7 @@ +# Runc version requirements for containerd + containerd is built with OCI support and with support for advanced features -provided by [runc](https://github.com/opencontainers/runc). +provided by the [runc container runtime](https://github.com/opencontainers/runc). Development (`-dev`) and pre-releases of containerd may depend features in `runc` that have not yet been released, and may require a specific runc build. The version @@ -14,35 +16,25 @@ or higher than the version of runc described in [`script/setup/runc-version`](.. If you encounter any runtime errors, make sure your runc is in sync with the commit or tag provided in that file. -## building +If you do not have the correct version of `runc` installed, you can refer to the +["building" section in the runc documentation](https://github.com/opencontainers/runc#building) +to learn how to build `runc` from source. -> For more information on how to clone and build runc also refer to the runc -> building [documentation](https://github.com/opencontainers/runc#building). +runc builds have [SELinux](https://en.wikipedia.org/wiki/Security-Enhanced_Linux), +[AppArmor](https://en.wikipedia.org/wiki/AppArmor), and [seccomp](https://en.wikipedia.org/wiki/seccomp) +support enabled by default. -Before building runc you may need to install additional build dependencies, which -will vary by platform. For example, you may need to install `libseccomp` e.g. -`libseccomp-dev` for Ubuntu. +Note that "seccomp" can be disabled by passing an empty `BUILDTAGS` make +variable, but is highly recommended to keep enabled. -From within your `opencontainers/runc` repository run: - -```bash -make && sudo make install -``` - -Starting with runc 1.0.0-rc93, the "selinux" and "apparmor" buildtags have been -removed, and runc builds have SELinux, AppArmor, and seccomp support enabled -by default. Note that "seccomp" can be disabled by passing an empty `BUILDTAGS` -make variable, but is highly recommended to keep enabled. - -By default, runc is compiled with kernel-memory limiting support enabled. This -functionality is deprecated in kernel 5.4 and up, and is known to be broken on -RHEL7 and CentOS 7 3.10 kernels. For these kernels, we recommend disabling kmem -support using the `nokmem` build-tag. When doing so, be sure to set the `seccomp` -build-tag to enable seccomp support, for example: +Use the output of the `runc --version` output to verify if your version of runc +has seccomp enabled. For example: ```sh -make BUILDTAGS='nokmem seccomp' && make install +$ runc --version +runc version 1.0.1 +commit: v1.0.1-0-g4144b638 +spec: 1.0.2-dev +go: go1.16.6 +libseccomp: 2.4.4 ``` - -For details about the `nokmem` build-tag, refer to the discussion on [opencontainers/runc#2594](https://github.com/opencontainers/runc/pull/2594). -For further details on building runc, refer to the [build instructions in the runc README](https://github.com/opencontainers/runc#building). diff --git a/docs/cri/architecture.md b/docs/cri/architecture.md index 823e728..2cff5f2 100644 --- a/docs/cri/architecture.md +++ b/docs/cri/architecture.md @@ -1,7 +1,7 @@ # Architecture of The CRI Plugin This document describes the architecture of the `cri` plugin for `containerd`. -This plugin is an implementation of Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto). Containerd operates on the same node as the [Kubelet](https://kubernetes.io/docs/reference/generated/kubelet/). The `cri` plugin inside containerd handles all CRI service requests from the Kubelet and uses containerd internals to manage containers and container images. +This plugin is an implementation of Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto). Containerd operates on the same node as the [Kubelet](https://kubernetes.io/docs/reference/generated/kubelet/). The `cri` plugin inside containerd handles all CRI service requests from the Kubelet and uses containerd internals to manage containers and container images. The `cri` plugin uses containerd to manage the full container lifecycle and all container images. As also shown below, `cri` manages pod networking via [CNI](https://github.com/containernetworking/cni) (another CNCF project). @@ -9,7 +9,7 @@ The `cri` plugin uses containerd to manage the full container lifecycle and all Let's use an example to demonstrate how the `cri` plugin works for the case when Kubelet creates a single-container pod: * Kubelet calls the `cri` plugin, via the CRI runtime service API, to create a pod; -* `cri` creates and configures the pod’s network namespace using CNI; +* `cri` creates the pod’s network namespace, and then configures it using CNI; * `cri` uses containerd internal to create and start a special [pause container](https://www.ianlewis.org/en/almighty-pause-container) (the sandbox container) and put that container inside the pod’s cgroups and namespace (steps omitted for brevity); * Kubelet subsequently calls the `cri` plugin, via the CRI image service API, to pull the application container image; * `cri` further uses containerd to pull the image if the image is not present on the node; diff --git a/docs/cri/config.md b/docs/cri/config.md index 388a334..4527f93 100644 --- a/docs/cri/config.md +++ b/docs/cri/config.md @@ -3,7 +3,7 @@ This document provides the description of the CRI plugin configuration. The CRI plugin config is part of the containerd config (default path: `/etc/containerd/config.toml`). -See [here](https://github.com/containerd/containerd/blob/master/docs/ops.md) +See [here](https://github.com/containerd/containerd/blob/main/docs/ops.md) for more information about containerd config. The explanation and default value of each configuration item are as follows: @@ -40,7 +40,7 @@ version = 2 selinux_category_range = 1024 # sandbox_image is the image used by sandbox container. - sandbox_image = "k8s.gcr.io/pause:3.5" + sandbox_image = "registry.k8s.io/pause:3.6" # stats_collect_period is the period (in seconds) of snapshots stats collection. stats_collect_period = 10 @@ -97,10 +97,29 @@ version = 2 # when using containerd with Kubernetes <=1.11. disable_proc_mount = false - # unsetSeccompProfile is the profile containerd/cri will use if the provided seccomp profile is - # unset (`""`) for a container (default is `unconfined`) + # unset_seccomp_profile is the seccomp profile containerd/cri will use if the seccomp + # profile requested over CRI is unset (or nil) for a pod/container (otherwise if this field is not set the + # default unset profile will map to `unconfined`) + # Note: The default unset seccomp profile should not be confused with the seccomp profile + # used in CRI when the runtime default seccomp profile is requested. In the later case, the + # default is set by the following code (https://github.com/containerd/containerd/blob/main/contrib/seccomp/seccomp_default.go). + # To summarize, there are two different seccomp defaults, the unset default used when the CRI request is + # set to nil or `unconfined`, and the default used when the runtime default seccomp profile is requested. unset_seccomp_profile = "" + # enable_unprivileged_ports configures net.ipv4.ip_unprivileged_port_start=0 + # for all containers which are not using host network + # and if it is not overwritten by PodSandboxConfig + # Note that currently default is set to disabled but target change it in future, see: + # [k8s discussion](https://github.com/kubernetes/kubernetes/issues/102612) + enable_unprivileged_ports = false + + # enable_unprivileged_icmp configures net.ipv4.ping_group_range="0 2147483647" + # for all containers which are not using host network, are not running in user namespace + # and if it is not overwritten by PodSandboxConfig + # Note that currently default is set to disabled but target change it in future together with enable_unprivileged_ports + enable_unprivileged_icmp = false + # 'plugins."io.containerd.grpc.v1.cri".containerd' contains config related to containerd [plugins."io.containerd.grpc.v1.cri".containerd] @@ -124,14 +143,20 @@ version = 2 # default_runtime_name is the default runtime name to use. default_runtime_name = "runc" + # ignore_rdt_not_enabled_errors disables RDT related errors when RDT + # support has not been enabled. Intel RDT is a technology for cache and + # memory bandwidth management. By default, trying to set the RDT class of + # a container via annotations produces an error if RDT hasn't been enabled. + # This config option practically enables a "soft" mode for RDT where these + # errors are ignored and the container gets no RDT class. + ignore_rdt_not_enabled_errors = false + # 'plugins."io.containerd.grpc.v1.cri".containerd.default_runtime' is the runtime to use in containerd. - # DEPRECATED: use `default_runtime_name` and `plugins."io.containerd.grpc.v1.cri".runtimes` instead. - # Remove in containerd 1.4. + # DEPRECATED: use `default_runtime_name` and `plugins."io.containerd.grpc.v1.cri".containerd.runtimes` instead. [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] # 'plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime' is a runtime to run untrusted workloads on it. - # DEPRECATED: use `untrusted` runtime in `plugins."io.containerd.grpc.v1.cri".runtimes` instead. - # Remove in containerd 1.4. + # DEPRECATED: use `untrusted` runtime in `plugins."io.containerd.grpc.v1.cri".containerd.runtimes` instead. [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] # 'plugins."io.containerd.grpc.v1.cri".containerd.runtimes' is a map from CRI RuntimeHandler strings, which specify types @@ -167,10 +192,22 @@ version = 2 # base_runtime_spec is a file path to a JSON file with the OCI spec that will be used as the base spec that all # container's are created from. # Use containerd's `ctr oci spec > /etc/containerd/cri-base.json` to output initial spec file. - # Spec files are loaded at launch, so containerd daemon must be restared on any changes to refresh default specs. + # Spec files are loaded at launch, so containerd daemon must be restarted on any changes to refresh default specs. # Still running containers and restarted containers will still be using the original spec from which that container was created. base_runtime_spec = "" + # conf_dir is the directory in which the admin places a CNI conf. + # this allows a different CNI conf for the network stack when a different runtime is being used. + cni_conf_dir = "/etc/cni/net.d" + + # cni_max_conf_num specifies the maximum number of CNI plugin config files to + # load from the CNI config directory. By default, only 1 CNI plugin config + # file will be loaded. If you want to load multiple CNI plugin config files + # set max_conf_num to the number desired. Setting cni_max_config_num to 0 is + # interpreted as no limit is desired and will result in all CNI plugin + # config files being loaded from the CNI config directory. + cni_max_conf_num = 1 + # 'plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options' is options specific to # "io.containerd.runc.v1" and "io.containerd.runc.v2". Its corresponding options type is: # https://github.com/containerd/containerd/blob/v1.3.2/runtime/v2/runc/options/oci.pb.go#L26 . @@ -234,13 +271,19 @@ version = 2 # This will be deprecated when kubenet is deprecated. # See the "CNI Config Template" section for more details. conf_template = "" + # ip_pref specifies the strategy to use when selecting the main IP address for a pod. + # options include: + # * ipv4, "" - (default) select the first ipv4 address + # * ipv6 - select the first ipv6 address + # * cni - use the order returned by the CNI plugins, returning the first IP address from the results + ip_pref = "ipv4" # 'plugins."io.containerd.grpc.v1.cri".image_decryption' contains config related # to handling decryption of encrypted container images. [plugins."io.containerd.grpc.v1.cri".image_decryption] # key_model defines the name of the key model used for how the cri obtains # keys used for decryption of encrypted container images. - # The [decryption document](https://github.com/containerd/cri/blob/master/docs/decryption.md) + # The [decryption document](https://github.com/containerd/containerd/blob/main/docs/cri/decryption.md) # contains additional information about the key models available. # # Set of available string options: {"", "node"} @@ -248,12 +291,12 @@ version = 2 # disabling image decryption. # # In order to use the decryption feature, additional configurations must be made. - # The [decryption document](https://github.com/containerd/cri/blob/master/docs/decryption.md) + # The [decryption document](https://github.com/containerd/containerd/blob/main/docs/cri/decryption.md) # provides information of how to set up stream processors and the containerd imgcrypt decoder # with the appropriate key models. # # Additional information: - # * Stream processors: https://github.com/containerd/containerd/blob/master/docs/stream_processors.md + # * Stream processors: https://github.com/containerd/containerd/blob/main/docs/stream_processors.md # * Containerd imgcrypt: https://github.com/containerd/imgcrypt key_model = "node" diff --git a/docs/cri/crictl.md b/docs/cri/crictl.md index 15aca39..8becc47 100644 --- a/docs/cri/crictl.md +++ b/docs/cri/crictl.md @@ -12,10 +12,10 @@ or `crictl` please make sure the issue has not already been submitted. If you have not already installed crictl please install the version compatible with the `cri` plugin you are using. If you are a user, your deployment should have installed crictl for you. If not, get it from your release tarball. -If you are a developer the current version of crictl is specified [here](../hack/utils.sh). +If you are a developer the current version of crictl is specified [here](/script/setup/critools-version). A helper command has been included to install the dependencies at the right version: ```console -$ make install.deps +$ make install-deps ``` * Note: The file named `/etc/crictl.yaml` is used to configure crictl so you don't have to repeatedly specify the runtime sock used to connect crictl @@ -59,29 +59,29 @@ command. With the load command you inject a container image into the container runtime from a file. First you need to create a container image tarball. For example to create an image tarball for a pause container using Docker: ```console -$ docker pull k8s.gcr.io/pause:3.5 - 3.5: Pulling from pause - 019d8da33d91: Pull complete - Digest: sha256:1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07 - Status: Downloaded newer image for k8s.gcr.io/pause:3.5 - k8s.gcr.io/pause:3.5 -$ docker save k8s.gcr.io/pause:3.5 -o pause.tar +$ docker pull registry.k8s.io/pause:3.6 + 3.6: Pulling from pause + fbe1a72f5dcd: Pull complete + Digest: sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + Status: Downloaded newer image for registry.k8s.io/pause:3.6 + registry.k8s.io/pause:3.6 +$ docker save registry.k8s.io/pause:3.6 -o pause.tar ``` Then use `ctr` to load the container image into the container runtime: ```console # The cri plugin uses the "k8s.io" containerd namespace. $ sudo ctr -n=k8s.io images import pause.tar - Loaded image: k8s.gcr.io/pause:3.5 + Loaded image: registry.k8s.io/pause:3.6 ``` List images and inspect the pause image: ```console $ sudo crictl images IMAGE TAG IMAGE ID SIZE docker.io/library/busybox latest f6e427c148a76 728kB -k8s.gcr.io/pause 3.5 ed210e3e4a5ba 683kB +registry.k8s.io/pause 3.6 ed210e3e4a5ba 683kB $ sudo crictl inspecti ed210e3e4a5ba ... displays information about the pause image. -$ sudo crictl inspecti k8s.gcr.io/pause:3.5 +$ sudo crictl inspecti registry.k8s.io/pause:3.6 ... displays information about the pause image. ``` @@ -201,7 +201,7 @@ $ crictl info } }, "streamServerPort": "10010", - "sandboxImage": "k8s.gcr.io/pause:3.5", + "sandboxImage": "registry.k8s.io/pause:3.6", "statsCollectPeriod": 10, "containerdRootDir": "/var/lib/containerd", "containerdEndpoint": "unix:///run/containerd/containerd.sock", diff --git a/docs/cri/decryption.md b/docs/cri/decryption.md index 32e2cd2..70fd898 100644 --- a/docs/cri/decryption.md +++ b/docs/cri/decryption.md @@ -40,6 +40,6 @@ version = 2 ``` In this example, container image decryption is set to use the "node" key model. -In addition, the decryption [`stream_processors`](https://github.com/containerd/containerd/blob/master/docs/stream_processors.md) are configured as specified in [containerd/imgcrypt project](https://github.com/containerd/imgcrypt), with the additional field `--decryption-keys-path` configured to specify where decryption keys are located locally in the node. +In addition, the decryption [`stream_processors`](https://github.com/containerd/containerd/blob/main/docs/stream_processors.md) are configured as specified in [containerd/imgcrypt project](https://github.com/containerd/imgcrypt), with the additional field `--decryption-keys-path` configured to specify where decryption keys are located locally in the node. -The `$OCICRYPT_KEYPROVIDER_CONFIG` environment variable is used for [ocicrypt keyprovider protocol](https://github.com/containers/ocicrypt/blob/master/docs/keyprovider.md). +The `$OCICRYPT_KEYPROVIDER_CONFIG` environment variable is used for [ocicrypt keyprovider protocol](https://github.com/containers/ocicrypt/blob/main/docs/keyprovider.md). diff --git a/docs/cri/proposal.md b/docs/cri/proposal.md index 64db560..ba6f4ca 100644 --- a/docs/cri/proposal.md +++ b/docs/cri/proposal.md @@ -23,7 +23,7 @@ Containerd is one potential alternative to Docker as the runtime for Kubernetes ### Cons * **User Adoption**: * Ideally, Kubernetes users don't interact with the underlying container runtime directly. However, for the lack of debug toolkits, sometimes users still need to login the node to debug with Docker CLI directly. - * Containerd provides barebone CLIs [ctr](https://github.com/containerd/containerd/tree/master/cmd/ctr) and [dist](https://github.com/containerd/containerd/tree/master/cmd/dist) for development and debugging purpose, but they may not be sufficient and necessary. Additionally, presuming these are sufficient and necessary tools, a plan and time would be needed to sufficiently document these CLIs and educate users in their use. + * Containerd provides barebone CLIs [ctr](https://github.com/containerd/containerd/tree/main/cmd/ctr) and [dist](https://github.com/containerd/containerd/tree/main/cmd/dist) for development and debugging purpose, but they may not be sufficient and necessary. Additionally, presuming these are sufficient and necessary tools, a plan and time would be needed to sufficiently document these CLIs and educate users in their use. * **Maturity**: The rescoped containerd is pretty new, and it's still under heavy development. ## Goals * Make sure containerd meets the requirement of Kubernetes, now and into the foreseeable future. @@ -64,7 +64,7 @@ CRI-containerd should: * Call [network plugin](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/network/plugins.go) to update the options of the network namespace; * Let the user containers in the same sandbox share the network namespace. ### Container Metrics -Containerd provides [container cgroup metrics](https://github.com/containerd/containerd/blob/master/reports/2017-03-17.md#metrics), and plans to provide [container writable layer disk usage](https://github.com/containerd/containerd/issues/678). +Containerd provides [container cgroup metrics](https://github.com/containerd/containerd/blob/main/reports/2017-03-17.md#metrics), and plans to provide [container writable layer disk usage](https://github.com/containerd/containerd/issues/678). CRI container metrics api needs to be defined ([#27097](https://github.com/kubernetes/kubernetes/issues/27097)). After that, CRI-containerd should translate containerd container metrics into CRI container metrics. ### Image Management @@ -77,7 +77,7 @@ CRI image filesystem metrics needs to be defined ([#33048](https://github.com/ku ### Out of Scope Following items are out of the scope of this design, we may address them in future version as enhancement or optimization. * **Debuggability**: One of the biggest concern of CRI-containerd is debuggability. We should provide equivalent debuggability with Docker CLI through `kubectl`, [`cri-tools`](https://github.com/kubernetes-sigs/cri-tools) or containerd CLI. -* **Built-in CRI support**: The [plugin model](https://github.com/containerd/containerd/blob/master/design/plugins.md) provided by containerd makes it possible to directly build CRI support into containerd as a plugin, which will eliminate one more hop from the stack. But because of the [limitation of golang plugin](https://github.com/containerd/containerd/issues/563), we have to either maintain our own branch or push CRI plugin upstream. +* **Built-in CRI support**: The [plugin model](https://github.com/containerd/containerd/blob/main/design/plugins.md) provided by containerd makes it possible to directly build CRI support into containerd as a plugin, which will eliminate one more hop from the stack. But because of the [limitation of golang plugin](https://github.com/containerd/containerd/issues/563), we have to either maintain our own branch or push CRI plugin upstream. * **Seccomp**: ([#36997](https://github.com/kubernetes/kubernetes/issues/36997)) Seccomp is supported in OCI runtime spec. However, current seccomp implementation in Kubernetes is experimental and docker specific, the api needs to be defined in CRI first before CRI-containerd implements it. * **Streaming server authentication**: ([#36666](https://github.com/kubernetes/kubernetes/issues/36666)) CRI-containerd will be out-of-process with Kubelet, so it could not reuse Kubelet authentication. Its streaming server should implement its own authentication mechanism. * **Move container facilities into pod cgroup**: Container facilities including container image puller, container streaming handler, log handler and containerd-shim serve a specific container. They should be moved to the corresponding pod cgroup, and the overhead introduced by them should be charged to the pod. diff --git a/docs/garbage-collection.md b/docs/garbage-collection.md index 2395e58..809d67b 100644 --- a/docs/garbage-collection.md +++ b/docs/garbage-collection.md @@ -140,8 +140,9 @@ configuration is under the `scheduler` plugin. The default configuration is represented as... ```.toml +version = 2 [plugins] - [plugins.scheduler] + [plugins."io.containerd.gc.v1.scheduler"] pause_threshold = 0.02 deletion_threshold = 0 mutation_threshold = 100 diff --git a/docs/getting-started.md b/docs/getting-started.md index 08570e6..6de99c6 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -16,21 +16,7 @@ You can download one of the latest builds for containerd on the [github releases If you are using systemd, we have a `containerd.service` file at the root of the repository that you can use. The daemon also uses a configuration file located in `/etc/containerd/config.toml` for specifying daemon level options. -A sample configuration file looks like this: - -```toml -oom_score = -999 - -[debug] - level = "debug" - -[metrics] - address = "127.0.0.1:1338" - -[plugins.linux] - runtime = "runc" - shim_debug = true -``` +A sample configuration file can be found [here](containerd/docs/man/containerd-config.toml.5.md) The default configuration can be generated via `containerd config default > /etc/containerd/config.toml`. diff --git a/docs/hosts.md b/docs/hosts.md index 8df0e98..0298c93 100644 --- a/docs/hosts.md +++ b/docs/hosts.md @@ -1,17 +1,30 @@ # Registry Configuration - Introduction -Configuring registries will be done by specifying (optionally) a `hosts.toml` file for -each desired registry host in a configuration directory. **Note**: Updates under this directory -do not require restarting the containerd daemon. +New and additional registry hosts config support has been implemented in containerd v1.5 for the `ctr` +client (the containerd tool for admins/developers), containerd image service clients, and CRI clients +such as `kubectl` and `crictl`. + +Configuring registries, for these clients, will be done by specifying (optionally) a `hosts.toml` file for +each desired registry host in a configuration directory. **Note**: Updates under this directory do not +require restarting the containerd daemon. + +## Registry API Support + +All configured registry hosts are expected to comply with the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). +Registries which are non-compliant or implement non-standard behavior are not guaranteed +to be supported and may break unexpectedly between releases. + +Currently supported OCI Distribution version: **[v1.0.0](https://github.com/opencontainers/distribution-spec/tree/v1.0.0)** ## Specifying the Configuration Directory ### Using Host Namespace Configs with CTR -When pulling via `ctr` use the `--hosts-dir` option: +When pulling a container image via `ctr` using the `--hosts-dir` option tells `ctr` +to find and use the host configuration files located in the specified path: ``` -ctr images pull --hosts-dir "/etc/containerd/certs.d" +ctr images pull --hosts-dir "/etc/containerd/certs.d" myregistry.io:5000/image_name:tag ``` ### CRI @@ -21,6 +34,8 @@ been **DEPRECATED**._ You should now point your registry `config_path` to the pa Modify your `config.toml` (default location: `/etc/containerd/config.toml`) as follows: ```toml +version = 2 + [plugins."io.containerd.grpc.v1.cri".registry] config_path = "/etc/containerd/certs.d" ``` @@ -37,7 +52,7 @@ A registry host is the location where container images and artifacts are sourced registry hosts may be local or remote and are typically accessed via http/https using the [OCI distribution specification](https://github.com/opencontainers/distribution-spec/blob/main/spec.md). A registry mirror is not a registry host but these mirrors can also be used to pull content. -Registry hosts are typically refered to by their internet domain names, aka. registry +Registry hosts are typically referred to by their internet domain names, aka. registry host names. For example, docker.io, quay.io, gcr.io, and ghcr.io. A registry host namespace is, for the purpose of containerd registry configuration, a @@ -58,6 +73,9 @@ $ tree /etc/containerd/certs.d └── hosts.toml ``` +Optionally the `_default` registry host namespace can be used as a fallback, if no +other namespace matches. + The `/v2` portion of the pull request format shown above refers to the version of the distribution api. If not included in the pull request, `/v2` is added by default for all clients compliant to the distribution specification linked above. @@ -101,7 +119,7 @@ OPTIONS: Although we have deprecated the old CRI config pattern for specifying registry.mirrors and registry.configs you can still specify your credentials via -[CRI config](https://github.com/containerd/containerd/blob/master/docs/cri/registry.md#configure-registry-credentials). +[CRI config](https://github.com/containerd/containerd/blob/main/docs/cri/registry.md#configure-registry-credentials). Additionally, the containerd CRI plugin implements/supports the authentication parameters passed in through CRI pull image service requests. For example, when containerd is the container runtime implementation for `Kubernetes`, the containerd CRI plugin receives @@ -142,6 +160,21 @@ server = "https://registry-1.docker.io" # Exclude this to not use upstream ca = "docker-mirror.crt" # Or absolute path /etc/containerd/certs.d/docker.io/docker-mirror.crt ``` +### Setup Default Mirror for All Registries + +``` +$ tree /etc/containerd/certs.d +/etc/containerd/certs.d +└── _default + └── hosts.toml + +$ cat /etc/containerd/certs.d/_default/hosts.toml +server = "https://registry.example.com" + +[host."https://registry.example.com"] + capabilities = ["pull", "resolve"] +``` + ### Bypass TLS Verification Example To bypass the TLS verification for a private registry at `192.168.31.250:5000` @@ -235,8 +268,10 @@ client = [["/etc/certs/client.cert", "/etc/certs/client.key"],["/etc/certs/clien ## skip_verify field -`skip_verify` set this flag to `true` to skip the registry certificate -verification for this registry host namespace. (Defaults to `false`) +`skip_verify` skips verifications of the registry's certificate chain and +host name when set to `true`. This should only be used for testing or in +combination with other method of verifying connections. (Defaults to `false`) + ``` skip_verify = false ``` @@ -264,6 +299,17 @@ or x-custom-1-2 = "another custom header" ``` +## override_path field + +`override_path` is used to indicate the host's API root endpoint is defined +in the URL path rather than by the API specification. This may be used with +non-compliant OCI registries which are missing the `/v2` prefix. +(Defaults to `false`) + +``` +override_path = true +``` + ## host field(s) (in the toml table format) `[host]."https://namespace"` and `[host].http://namespace` entries in the @@ -300,6 +346,10 @@ for this registry host namespace: [host."https://test-3.registry"] client = ["/etc/certs/client-1.pem", "/etc/certs/client-2.pem"] + +[host."https://non-compliant-mirror.registry/v2/upstream"] + capabilities = ["pull"] + override_path = true ``` **Note**: Recursion is not supported in the specification of host mirror diff --git a/docs/man/containerd-config.toml.5.md b/docs/man/containerd-config.toml.5.md index ccee9e8..121e5aa 100644 --- a/docs/man/containerd-config.toml.5.md +++ b/docs/man/containerd-config.toml.5.md @@ -1,4 +1,4 @@ -# /etc/containerd/config.toml 5 08/08/2018 +# /etc/containerd/config.toml 5 04/05/2022 ## NAME @@ -23,18 +23,120 @@ settings. ## FORMAT +**version** +: The version field in the config file specifies the config’s version. If no +version number is specified inside the config file then it is assumed to be a +version 1 config and parsed as such. Please use version = 2 to enable version 2 +config as version 1 has been deprecated. + **root** : The root directory for containerd metadata. (Default: "/var/lib/containerd") **state** : The state directory for containerd (Default: "/run/containerd") +**plugin_dir** +: The directory for dynamic plugins to be stored + +**[grpc]** +: Section for gRPC socket listener settings. Contains the following properties: + +- **address** (Default: "/run/containerd/containerd.sock") +- **tcp_address** +- **tcp_tls_cert** +- **tcp_tls_key** +- **uid** (Default: 0) +- **gid** (Default: 0) +- **max_recv_message_size** +- **max_send_message_size** + +**[ttrpc]** +: Section for TTRPC settings. Contains properties: + +- **address** (Default: "") +- **uid** (Default: 0) +- **gid** (Default: 0) + +**[debug]** +: Section to enable and configure a debug socket listener. Contains four properties: + +- **address** (Default: "/run/containerd/debug.sock") +- **uid** (Default: 0) +- **gid** (Default: 0) +- **level** (Default: "info") sets the debug log level + +**[metrics]** +: Section to enable and configure a metrics listener. Contains two properties: + +- **address** (Default: "") Metrics endpoint does not listen by default +- **grpc_histogram** (Default: false) Turn on or off gRPC histogram metrics + +**disabled_plugins** +: Disabled plugins are IDs of plugins to disable. Disabled plugins won't be +initialized and started. + +**required_plugins** +: Required plugins are IDs of required plugins. Containerd exits if any +required plugin doesn't exist or fails to be initialized or started. + +**[plugins]** +: The plugins section contains configuration options exposed from installed plugins. +The following plugins are enabled by default and their settings are shown below. +Plugins that are not enabled by default will provide their own configuration values +documentation. + +- **[plugins."io.containerd.monitor.v1.cgroups"]** has one option __no_prometheus__ (Default: **false**) +- **[plugins."io.containerd.service.v1.diff-service"]** has one option __default__, a list by default set to **["walking"]** +- **[plugins."io.containerd.runtime.v1.linux"]** has several options for configuring the runtime, shim, and related options: + - **shim** specifies the shim binary (Default: **"containerd-shim"**), + - **runtime** is the OCI compliant runtime binary (Default: **"runc"**), + - **runtime_root** is the root directory used by the runtime (Default: **""**), + - **no_shim** specifies whether to use a shim or not (Default: **false**), + - **shim_debug** turns on debugging for the shim (Default: **false**) +- **[plugins."io.containerd.gc.v1.scheduler"]** has several options that perform advanced tuning for the scheduler: + - **pause_threshold** is the maximum amount of time GC should be scheduled (Default: **0.02**), + - **deletion_threshold** guarantees GC is scheduled after n number of deletions (Default: **0** [not triggered]), + - **mutation_threshold** guarantees GC is scheduled after n number of database mutations (Default: **100**), + - **schedule_delay** defines the delay after trigger event before scheduling a GC (Default **"0ms"** [immediate]), + - **startup_delay** defines the delay after startup before scheduling a GC (Default **"100ms"**) +- **[plugins."io.containerd.runtime.v2.task"]** specifies options for configuring the runtime shim: + - **platforms** specifies the list of supported platforms + - **sched_core** Core scheduling is a feature that allows only trusted tasks + to run concurrently on cpus sharing compute resources (eg: hyperthreads on + a core). (Default: **false**) +- **[plugins."io.containerd.service.v1.tasks-service"]** has one option: + - **rdt_config_file** (Linux only) specifies path to a configuration used for + configuring RDT (Default: **""**). Enables support for Intel RDT, a + technology for cache and memory bandwidth management. + See https://github.com/intel/goresctrl/blob/v0.2.0/doc/rdt.md#configuration + for details of the configuration file format. + **oom_score** : The out of memory (OOM) score applied to the containerd daemon process (Default: 0) +**[cgroup]** +: Section for Linux cgroup specific settings + +- **path** (Default: "") Specify a custom cgroup path for created containers + +**[proxy_plugins]** +: Proxy plugins configures plugins which are communicated to over gRPC + +- **type** (Default: "") +- **address** (Default: "") + +**timeouts** +: Timeouts specified as a duration + + + **imports** : Imports is a list of additional configuration files to include. -This allows one to split the main configuration file and keep some sections +This allows to split the main configuration file and keep some sections separately (for example vendors may keep a custom runtime configuration in a separate file without modifying the main `config.toml`). Imported files will overwrite simple fields like `int` or @@ -42,53 +144,20 @@ Imported files will overwrite simple fields like `int` or Imported files are also versioned, and the version can't be higher than the main config. -**[grpc]** -: Section for gRPC socket listener settings. Contains three properties: - - **address** (Default: "/run/containerd/containerd.sock") - - **uid** (Default: 0) - - **gid** (Default: 0) +**stream_processors** -**[debug]** -: Section to enable and configure a debug socket listener. Contains four properties: - - **address** (Default: "/run/containerd/debug.sock") - - **uid** (Default: 0) - - **gid** (Default: 0) - - **level** (Default: "info") sets the debug log level - -**[metrics]** -: Section to enable and configure a metrics listener. Contains two properties: - - **address** (Default: "") Metrics endpoint does not listen by default - - **grpc_histogram** (Default: false) Turn on or off gRPC histogram metrics - -**[cgroup]** -: Section for Linux cgroup specific settings - - **path** (Default: "") Specify a custom cgroup path for created containers - -**[plugins]** -: The plugins section contains configuration options exposed from installed plugins. -The following plugins are enabled by default and their settings are shown below. -Plugins that are not enabled by default will provide their own configuration values -documentation. - - **[plugins.cgroup]** has one option __no_prometheus__ (Default: **false**) - - **[plugins.diff]** has one option __default__, a list by default set to **["walking"]** - - **[plugins.linux]** has several options for configuring the runtime, shim, and related options: - **shim** specifies the shim binary (Default: **"containerd-shim"**), - **runtime** is the OCI compliant runtime binary (Default: **"runc"**), - **runtime_root** is the root directory used by the runtime (Default: **""**), - **no_shim** specifies whether to use a shim or not (Default: **false**), - **shim_debug** turns on debugging for the shim (Default: **false**) - - **[plugins.scheduler]** has several options that perform advanced tuning for the scheduler: - **pause_threshold** is the maximum amount of time GC should be scheduled (Default: **0.02**), - **deletion_threshold** guarantees GC is scheduled after n number of deletions (Default: **0** [not triggered]), - **mutation_threshold** guarantees GC is scheduled after n number of database mutations (Default: **100**), - **schedule_delay** defines the delay after trigger event before scheduling a GC (Default **"0ms"** [immediate]), - **startup_delay** defines the delay after startup before scheduling a GC (Default **"100ms"**) +- **accepts** (Default: "[]") Accepts specific media-types +- **returns** (Default: "") Returns the media-type +- **path** (Default: "") Path or name of the binary +- **args** (Default: "[]") Args to the binary ## EXAMPLE The following is a complete **config.toml** default configuration example: -``` +```toml +version = 2 + root = "/var/lib/containerd" state = "/run/containerd" oom_score = 0 @@ -113,22 +182,27 @@ imports = ["/etc/containerd/runtime_*.toml", "./debug.toml"] path = "" [plugins] - [plugins.cgroups] + [plugins."io.containerd.monitor.v1.cgroups"] no_prometheus = false - [plugins.diff] + [plugins."io.containerd.service.v1.diff-service"] default = ["walking"] - [plugins.linux] + [plugins."io.containerd.runtime.v1.linux"] shim = "containerd-shim" runtime = "runc" runtime_root = "" no_shim = false shim_debug = false - [plugins.scheduler] + [plugins."io.containerd.gc.v1.scheduler"] pause_threshold = 0.02 deletion_threshold = 0 mutation_threshold = 100 schedule_delay = 0 startup_delay = "100ms" + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = true + [plugins."io.containerd.service.v1.tasks-service"] + rdt_config_file = "/etc/rdt-config.yaml" ``` ## BUGS diff --git a/docs/managed-opt.md b/docs/managed-opt.md index 73955d6..47085ce 100644 --- a/docs/managed-opt.md +++ b/docs/managed-opt.md @@ -11,7 +11,9 @@ Configuration: *containerd config:* ```toml -[plugins.opt] +version = 2 + +[plugins."io.containerd.internal.v1.opt"] path = "/opt/mypath" ``` diff --git a/docs/ops.md b/docs/ops.md index 320b77d..397b40e 100644 --- a/docs/ops.md +++ b/docs/ops.md @@ -82,8 +82,8 @@ There are a few settings that are important for ops. The first setting is the `oom_score`. Because containerd will be managing multiple containers, we need to ensure that containers are killed before the containerd daemon gets into an out of memory condition. We also do not want to make containerd unkillable, but we want to lower its score to the level of other system daemons. -containerd also exports its own metrics as well as container level metrics via the prometheus metrics format. -Currently, prometheus only supports TCP endpoints, therefore, the metrics address should be a TCP address that your prometheus infrastructure can scrape metrics from. +containerd also exports its own metrics as well as container level metrics via the Prometheus metrics format under `/v1/metrics`. +Currently, Prometheus only supports TCP endpoints, therefore, the metrics address should be a TCP address that your Prometheus infrastructure can scrape metrics from. containerd also has two different storage locations on a host system. One is for persistent data and the other is for runtime state. @@ -154,6 +154,8 @@ They should not be tampered with as corruption and bugs can and will happen. External apps reading or watching changes in these directories have been known to cause `EBUSY` and stale file handles when containerd and/or its plugins try to cleanup resources. ```toml +version = 2 + # persistent data location root = "/var/lib/containerd" # runtime state information @@ -203,7 +205,9 @@ See [containerd's Plugin documentation](./PLUGINS.md) The linux runtime allows a few options to be set to configure the shim and the runtime that you are using. ```toml -[plugins.linux] +version = 2 + +[plugins."io.containerd.runtime.v1.linux"] # shim binary name/path shim = "" # runtime binary name/path @@ -229,6 +233,8 @@ Both modes share backing data, while "shared" will reduce total bandwidth across The default is "shared". While this is largely the most desired policy, one can change to "isolated" mode with the following configuration: ```toml -[plugins.bolt] +version = 2 + +[plugins."io.containerd.metadata.v1.bolt"] content_sharing_policy = "isolated" ``` diff --git a/docs/stream_processors.md b/docs/stream_processors.md index ba227a5..0486b24 100644 --- a/docs/stream_processors.md +++ b/docs/stream_processors.md @@ -33,6 +33,8 @@ Processor Fields: * `args` - Arguments passed to the processor binary. ```toml +version = 2 + [stream_processors] [stream_processors."io.containerd.processor.v1.pigz"] accepts = ["application/vnd.docker.image.rootfs.diff.tar.gzip"] diff --git a/docs/tracing.md b/docs/tracing.md new file mode 100644 index 0000000..3cc8469 --- /dev/null +++ b/docs/tracing.md @@ -0,0 +1,88 @@ +# Tracing + +containerd supports OpenTelemetry tracing since v1.6.0. +Tracing currently targets only gRPC calls. + +## Sending traces from containerd deamon + +By configuring `io.containerd.tracing.processor.v1.otlp` plugin. +containerd daemon can send traces to the specified OpenTelemetry endpoint. + +```toml +version = 2 + +[plugins."io.containerd.tracing.processor.v1.otlp"] + endpoint = "http://localhost:4318" +``` + +The following options are supported. + +- `endpoint`: The address of a server that receives [OpenTelemetry Protocol](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/otlp.md). +- `protocol`: OpenTelemetry supports multiple protocols. + The default value is "http/protobuf". "grpc" is also supported. +- `insecure`: Disable transport security when the protocol is "grpc". The default is false. + "http/protobuf" always uses the schema provided by the endpoint and + the value of this setting being ignored. + +The sampling ratio and the service name on the traces could be configured by +`io.containerd.internal.v1.tracing` plugin. + +```toml +version = 2 + +[plugins."io.containerd.internal.v1.tracing"] + sampling_ratio = 1.0 + service_name = "containerd" +``` + +## Sending traces from containerd client + +By configuring its underlying gRPC client, containerd's Go client can send +traces to an OpenTelemetry endpoint. + +Note that the Go client's methods and gRPC calls are not 1:1. Single method +call would issue multiple gRPC calls. + +```go +func clientWithTrace() error { + exp, err := otlptracehttp.New(ctx, + otlptracehttp.WithEndpoint("localhost:4318"), + otlptracehttp.WithInsecure(), + ) + if err != nil { + return err + } + + res, err := resource.New(ctx, resource.WithAttributes( + semconv.ServiceNameKey.String("CLIENT NAME"), + )) + if err != nil { + return err + } + + provider := trace.NewTracerProvider( + trace.WithSampler(trace.AlwaysSample()), + trace.WithSpanProcessor(trace.NewSimpleSpanProcessor(exp)), + trace.WithResource(res), + ) + otel.SetTracerProvider(provider) + otel.SetTextMapPropagator(propagation.TraceContext{}) + + ... + + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), + grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), + } + client, ctx, cancel, err := commands.NewClient(context, containerd.WithDialOpts(dialOpts)) + if err != nil { + return err + } + defer cancel() + + ctx, span := tracing.StartSpan(ctx, "OPERATION NAME") + defer span.End() + ... +} +``` diff --git a/errdefs/errors.go b/errdefs/errors.go index 05a3522..8762255 100644 --- a/errdefs/errors.go +++ b/errdefs/errors.go @@ -17,7 +17,7 @@ // Package errdefs defines the common errors used throughout containerd // packages. // -// Use with errors.Wrap and error.Wrapf to add context to an error. +// Use with fmt.Errorf to add context to an error. // // To detect an error class, use the IsXXX functions to tell whether an error // is of a certain type. @@ -28,8 +28,7 @@ package errdefs import ( "context" - - "github.com/pkg/errors" + "errors" ) // Definitions of common error types used throughout containerd. All containerd diff --git a/errdefs/grpc.go b/errdefs/grpc.go index 209f63b..7a9b33e 100644 --- a/errdefs/grpc.go +++ b/errdefs/grpc.go @@ -18,9 +18,9 @@ package errdefs import ( "context" + "fmt" "strings" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -68,9 +68,9 @@ func ToGRPC(err error) error { // ToGRPCf maps the error to grpc error codes, assembling the formatting string // and combining it with the target error string. // -// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(errors.Wrapf(err, format, args...)) + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) } // FromGRPC returns the underlying error from a grpc service based on the grpc error code @@ -104,9 +104,9 @@ func FromGRPC(err error) error { msg := rebaseMessage(cls, err) if msg != "" { - err = errors.Wrap(cls, msg) + err = fmt.Errorf("%s: %w", msg, cls) } else { - err = errors.WithStack(cls) + err = cls } return err diff --git a/errdefs/grpc_test.go b/errdefs/grpc_test.go index 982cebd..8c69a40 100644 --- a/errdefs/grpc_test.go +++ b/errdefs/grpc_test.go @@ -18,12 +18,12 @@ package errdefs import ( "context" + "errors" + "fmt" "testing" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "github.com/pkg/errors" ) func TestGRPCRoundTrip(t *testing.T) { @@ -42,8 +42,9 @@ func TestGRPCRoundTrip(t *testing.T) { input: ErrNotFound, cause: ErrNotFound, }, + //nolint:dupword { - input: errors.Wrapf(ErrFailedPrecondition, "test test test"), + input: fmt.Errorf("test test test: %w", ErrFailedPrecondition), cause: ErrFailedPrecondition, str: "test test test: failed precondition", }, @@ -63,7 +64,7 @@ func TestGRPCRoundTrip(t *testing.T) { str: "context canceled", }, { - input: errors.Wrapf(context.Canceled, "this is a test cancel"), + input: fmt.Errorf("this is a test cancel: %w", context.Canceled), cause: context.Canceled, str: "this is a test cancel: context canceled", }, @@ -73,7 +74,7 @@ func TestGRPCRoundTrip(t *testing.T) { str: "context deadline exceeded", }, { - input: errors.Wrapf(context.DeadlineExceeded, "this is a test deadline exceeded"), + input: fmt.Errorf("this is a test deadline exceeded: %w", context.DeadlineExceeded), cause: context.DeadlineExceeded, str: "this is a test deadline exceeded: context deadline exceeded", }, @@ -85,9 +86,6 @@ func TestGRPCRoundTrip(t *testing.T) { ferr := FromGRPC(gerr) t.Logf("recovered: %v", ferr) - if errors.Cause(ferr) != testcase.cause { - t.Fatalf("unexpected cause: %v != %v", errors.Cause(ferr), testcase.cause) - } if !errors.Is(ferr, testcase.cause) { t.Fatalf("unexpected cause: !errors.Is(%v, %v)", ferr, testcase.cause) } diff --git a/events/exchange/exchange.go b/events/exchange/exchange.go index eb27bf2..a1f385d 100644 --- a/events/exchange/exchange.go +++ b/events/exchange/exchange.go @@ -18,6 +18,7 @@ package exchange import ( "context" + "fmt" "strings" "time" @@ -30,7 +31,6 @@ import ( "github.com/containerd/typeurl" goevents "github.com/docker/go-events" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -88,10 +88,10 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event namespace, err = namespaces.NamespaceRequired(ctx) if err != nil { - return errors.Wrapf(err, "failed publishing event") + return fmt.Errorf("failed publishing event: %w", err) } if err := validateTopic(topic); err != nil { - return errors.Wrapf(err, "envelope topic %q", topic) + return fmt.Errorf("envelope topic %q: %w", topic, err) } encoded, err = typeurl.MarshalAny(event) @@ -150,7 +150,7 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even if len(fs) > 0 { filter, err := filters.ParseAll(fs...) if err != nil { - errq <- errors.Wrapf(err, "failed parsing subscription filters") + errq <- fmt.Errorf("failed parsing subscription filters: %w", err) closeAll() return } @@ -175,7 +175,7 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even // TODO(stevvooe): For the most part, we are well protected // from this condition. Both Forward and Publish protect // from this. - err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev) + err = fmt.Errorf("invalid envelope encountered %#v; please file a bug", ev) break } @@ -203,21 +203,21 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even func validateTopic(topic string) error { if topic == "" { - return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty") + return fmt.Errorf("must not be empty: %w", errdefs.ErrInvalidArgument) } if topic[0] != '/' { - return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'") + return fmt.Errorf("must start with '/': %w", errdefs.ErrInvalidArgument) } if len(topic) == 1 { - return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component") + return fmt.Errorf("must have at least one component: %w", errdefs.ErrInvalidArgument) } components := strings.Split(topic[1:], "/") for _, component := range components { if err := identifiers.Validate(component); err != nil { - return errors.Wrapf(err, "failed validation on component %q", component) + return fmt.Errorf("failed validation on component %q: %w", component, err) } } @@ -226,15 +226,15 @@ func validateTopic(topic string) error { func validateEnvelope(envelope *events.Envelope) error { if err := identifiers.Validate(envelope.Namespace); err != nil { - return errors.Wrapf(err, "event envelope has invalid namespace") + return fmt.Errorf("event envelope has invalid namespace: %w", err) } if err := validateTopic(envelope.Topic); err != nil { - return errors.Wrapf(err, "envelope topic %q", envelope.Topic) + return fmt.Errorf("envelope topic %q: %w", envelope.Topic, err) } if envelope.Timestamp.IsZero() { - return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event") + return fmt.Errorf("timestamp must be set on forwarded event: %w", errdefs.ErrInvalidArgument) } return nil diff --git a/events/exchange/exchange_test.go b/events/exchange/exchange_test.go index 97d2879..8f87cbb 100644 --- a/events/exchange/exchange_test.go +++ b/events/exchange/exchange_test.go @@ -18,6 +18,7 @@ package exchange import ( "context" + "errors" "reflect" "testing" "time" @@ -27,7 +28,6 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/namespaces" "github.com/containerd/typeurl" - "github.com/pkg/errors" ) func TestExchangeBasic(t *testing.T) { diff --git a/events/plugin/plugin.go b/events/plugin/plugin.go new file mode 100644 index 0000000..eab0a3b --- /dev/null +++ b/events/plugin/plugin.go @@ -0,0 +1,32 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "github.com/containerd/containerd/plugin" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.EventPlugin, + ID: "exchange", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + // TODO: In 2.0, create exchange since ic.Events will be removed + return ic.Events, nil + }, + }) +} diff --git a/filters/filter.go b/filters/filter.go index cf09d8d..e13f262 100644 --- a/filters/filter.go +++ b/filters/filter.go @@ -65,7 +65,6 @@ // ``` // name==foo,labels.bar // ``` -// package filters import ( diff --git a/filters/parser.go b/filters/parser.go index 0825d66..3276790 100644 --- a/filters/parser.go +++ b/filters/parser.go @@ -21,7 +21,6 @@ import ( "io" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) /* @@ -46,7 +45,6 @@ field := quoted | [A-Za-z] [A-Za-z0-9_]+ operator := "==" | "!=" | "~=" value := quoted | [^\s,]+ quoted := - */ func Parse(s string) (Filter, error) { // special case empty to match all @@ -71,7 +69,7 @@ func ParseAll(ss ...string) (Filter, error) { for _, s := range ss { f, err := Parse(s) if err != nil { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) } fs = append(fs, f) @@ -90,7 +88,7 @@ func (p *parser) parse() (Filter, error) { ss, err := p.selectors() if err != nil { - return nil, errors.Wrap(err, "filters") + return nil, fmt.Errorf("filters: %w", err) } return ss, nil @@ -284,9 +282,9 @@ func (pe parseError) Error() string { } func (p *parser) mkerr(pos int, format string, args ...interface{}) error { - return errors.Wrap(parseError{ + return fmt.Errorf("parse error: %w", parseError{ input: p.input, pos: pos, msg: fmt.Sprintf(format, args...), - }, "parse error") + }) } diff --git a/filters/quote.go b/filters/quote.go index 2d64e23..5c800ef 100644 --- a/filters/quote.go +++ b/filters/quote.go @@ -17,9 +17,8 @@ package filters import ( + "errors" "unicode/utf8" - - "github.com/pkg/errors" ) // NOTE(stevvooe): Most of this code in this file is copied from the stdlib @@ -32,10 +31,10 @@ var errQuoteSyntax = errors.New("quote syntax error") // or character literal represented by the string s. // It returns four values: // -// 1) value, the decoded Unicode code point or byte value; -// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3) tail, the remainder of the string after the character; and -// 4) an error that will be nil if the character is syntactically valid. +// 1. value, the decoded Unicode code point or byte value; +// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3. tail, the remainder of the string after the character; and +// 4. an error that will be nil if the character is syntactically valid. // // The second argument, quote, specifies the type of literal being parsed // and therefore which escaped quote character is permitted. diff --git a/gc/gc.go b/gc/gc.go index 4f71cb3..079a6ea 100644 --- a/gc/gc.go +++ b/gc/gc.go @@ -59,6 +59,8 @@ type Stats interface { // // We can probably use this to inform a design for incremental GC by injecting // callbacks to the set modification algorithms. +// +// https://en.wikipedia.org/wiki/Tracing_garbage_collection#Tri-color_marking func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struct{}, error) { var ( grays []Node // maintain a gray "stack" diff --git a/gc/scheduler/scheduler.go b/gc/scheduler/scheduler.go index f5890fb..f699924 100644 --- a/gc/scheduler/scheduler.go +++ b/gc/scheduler/scheduler.go @@ -18,6 +18,7 @@ package scheduler import ( "context" + "errors" "fmt" "sync" "time" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" "github.com/containerd/containerd/plugin" - "github.com/pkg/errors" ) // config configures the garbage collection policies. @@ -117,7 +117,7 @@ func init() { mdCollector, ok := md.(collector) if !ok { - return nil, errors.Errorf("%s %T must implement collector", plugin.MetadataPlugin, md) + return nil, fmt.Errorf("%s %T must implement collector", plugin.MetadataPlugin, md) } m := newScheduler(mdCollector, ic.Config.(*config)) diff --git a/go.mod b/go.mod index b176e94..4f75217 100644 --- a/go.mod +++ b/go.mod @@ -1,123 +1,141 @@ module github.com/containerd/containerd -go 1.17 +go 1.19 require ( - github.com/Microsoft/go-winio v0.4.17 - github.com/Microsoft/hcsshim v0.8.23 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8 + github.com/Microsoft/go-winio v0.5.2 + github.com/Microsoft/hcsshim v0.9.10 github.com/containerd/aufs v1.0.0 github.com/containerd/btrfs v1.0.0 - github.com/containerd/cgroups v1.0.1 - github.com/containerd/console v1.0.2 - github.com/containerd/continuity v0.1.0 + github.com/containerd/cgroups v1.0.4 + github.com/containerd/console v1.0.3 + github.com/containerd/continuity v0.3.0 github.com/containerd/fifo v1.0.0 - github.com/containerd/go-cni v1.0.2 + github.com/containerd/go-cni v1.1.6 github.com/containerd/go-runc v1.0.0 - github.com/containerd/imgcrypt v1.1.1 + github.com/containerd/imgcrypt v1.1.4 github.com/containerd/nri v0.1.0 - github.com/containerd/ttrpc v1.1.0 + github.com/containerd/ttrpc v1.1.2 github.com/containerd/typeurl v1.0.2 - github.com/containerd/zfs v1.0.0 - github.com/containernetworking/plugins v0.9.1 + github.com/containerd/zfs v1.1.0 + github.com/containernetworking/cni v1.1.1 + github.com/containernetworking/plugins v1.1.1 github.com/coreos/go-systemd/v22 v22.3.2 github.com/davecgh/go-spew v1.1.1 github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c github.com/docker/go-metrics v0.0.1 github.com/docker/go-units v0.4.0 - github.com/emicklei/go-restful v2.9.5+incompatible + github.com/emicklei/go-restful/v3 v3.10.1 github.com/fsnotify/fsnotify v1.4.9 github.com/gogo/googleapis v1.4.0 github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.5.0 - github.com/google/go-cmp v0.5.5 - github.com/google/uuid v1.2.0 + github.com/google/go-cmp v0.5.9 + github.com/google/uuid v1.3.0 + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/go-multierror v1.0.0 + github.com/hashicorp/go-multierror v1.1.1 github.com/imdario/mergo v0.3.12 + github.com/intel/goresctrl v0.2.0 github.com/klauspost/compress v1.11.13 github.com/moby/locker v1.0.1 - github.com/moby/sys/mountinfo v0.4.1 - github.com/moby/sys/symlink v0.1.0 + github.com/moby/sys/mountinfo v0.6.2 + github.com/moby/sys/signal v0.6.0 + github.com/moby/sys/symlink v0.2.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.2 - github.com/opencontainers/runc v1.0.2 + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b + github.com/opencontainers/runc v1.1.5 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 - github.com/opencontainers/selinux v1.8.2 - github.com/pelletier/go-toml v1.8.1 - github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.7.1 - github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.6.1 + github.com/opencontainers/selinux v1.10.1 + github.com/pelletier/go-toml v1.9.5 + github.com/prometheus/client_golang v1.11.1 + github.com/sirupsen/logrus v1.9.0 + github.com/stretchr/testify v1.8.1 github.com/tchap/go-patricia v2.2.6+incompatible github.com/urfave/cli v1.22.2 - go.etcd.io/bbolt v1.3.5 - golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 - google.golang.org/grpc v1.33.2 - gotest.tools/v3 v3.0.3 - k8s.io/api v0.20.6 - k8s.io/apimachinery v0.20.6 - k8s.io/apiserver v0.20.6 - k8s.io/client-go v0.20.6 - k8s.io/component-base v0.20.6 - k8s.io/cri-api v0.20.6 - k8s.io/klog/v2 v2.4.0 - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 + go.etcd.io/bbolt v1.3.7 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 + go.opentelemetry.io/otel v1.3.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 + go.opentelemetry.io/otel/sdk v1.3.0 + go.opentelemetry.io/otel/trace v1.3.0 + golang.org/x/net v0.8.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.6.0 + google.golang.org/grpc v1.50.1 + google.golang.org/protobuf v1.28.1 + gotest.tools/v3 v3.5.0 + k8s.io/api v0.22.5 + k8s.io/apimachinery v0.22.5 + k8s.io/apiserver v0.22.5 + k8s.io/client-go v0.22.5 + k8s.io/component-base v0.22.5 + k8s.io/cri-api v0.25.0 + k8s.io/klog/v2 v2.30.0 + k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b ) require ( github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.2.0 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/cilium/ebpf v0.6.2 // indirect - github.com/containernetworking/cni v0.8.1 // indirect - github.com/containers/ocicrypt v1.1.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cilium/ebpf v0.7.0 // indirect + github.com/containers/ocicrypt v1.1.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect - github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 // indirect - github.com/go-logr/logr v0.2.0 // indirect - github.com/godbus/dbus/v5 v5.0.4 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/json-iterator/go v1.1.10 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/miekg/pkcs11 v1.0.3 // indirect - github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect + github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/go-logr/logr v1.2.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/godbus/dbus/v5 v5.0.6 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.10.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/common v0.30.0 // indirect + github.com/prometheus/procfs v0.7.3 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/satori/go.uuid v1.2.0 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect + github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect - go.opencensus.io v0.22.3 // indirect - golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 // indirect - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect - golang.org/x/text v0.3.4 // indirect - golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.5 // indirect - google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 // indirect + go.opentelemetry.io/proto/otlp v0.11.0 // indirect + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect + golang.org/x/term v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) -// When updating replace rules, make sure to also update the rules in integration/client/go.mod +// When updating replace rules, make sure to also update the rules in integration/client/go.mod and api/go.mod replace ( github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2 - github.com/golang/protobuf => github.com/golang/protobuf v1.3.5 + // urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092 github.com/urfave/cli => github.com/urfave/cli v1.22.1 google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 - google.golang.org/grpc => google.golang.org/grpc v1.27.1 ) diff --git a/go.sum b/go.sum index 0bc0c69..2236fae 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -9,122 +11,294 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8 h1:V8krnnfGj4pV65YLUm3C0/8bl7V5Nry2Pwvy3ru/wLc= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.10 h1:TxXGNmcbQxBKVWvjvTocNb6jrPyeHlk5EiDhhgHgggs= +github.com/Microsoft/hcsshim v0.9.10/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/btrfs v1.0.0 h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.2 h1:YbJAhpTevL2v6u8JC1NhCYRwf+3Vzxcc5vGnYoJ7VeE= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6 h1:el5WPymG5nRRLQF1EfB97FWob4Tdc8INg8RZMaXWZlo= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.1.1 h1:LBwiTfoUsdiEGAR1TpvxE+Gzt7469oVu87iR3mv3Byc= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4 h1:iKTstFebwy3Ak5UF0RHSeuCTahC5OIrPJa6vjMAM81s= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0 h1:6QioHRlThlKh2RkRTR4kIT3PKAcrLo3gIWnjkM4dQmQ= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.1.2 h1:4jH6OQDQqjfVD2b5TJS5TxmGuLGmp5WW7KtW2TWOP7c= +github.com/containerd/ttrpc v1.1.2/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v1.0.0 h1:cXLJbx+4Jj7rNsTiqVfm6i+RNLx6FFA2fMmDlEf+Wm8= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.1.0 h1:n7OZ7jZumLIqNJqXrEc/paBM840mORnmGdJDmAmJZHM= +github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.9.1 h1:FD1tADPls2EEi3flPc2OegIY1M9pUa9r2Quag7HMLV8= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/ocicrypt v1.1.1 h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3 h1:uMxn2wTb4nDR7GqG3rnZSfpJXqWURfzZ7nKydzIeKpA= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -134,7 +308,10 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -142,15 +319,27 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjI github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -160,128 +349,250 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c h1:RBUpb2b14UnmRHNd2uHz20ZHLDK+SW5Us/vWF5IHRaY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.3.2 h1:kX1es4djPJrsDhY7aZKJy7aZasdcB5oSOEphMjSB53c= github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/intel/goresctrl v0.2.0 h1:JyZjdMQu9Kl/wLXe9xA6s1X+tF6BWsQPFGJMEeCfWzE= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -294,127 +605,232 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/symlink v0.1.0 h1:MTFZ74KtNI6qQQpuBxU+uKCim4WtOMokr03hCfJcazE= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -422,6 +838,7 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -430,59 +847,148 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 h1:Ky1MObd188aGbgb5OgNnwGuEEwI9MVIcc7rBW6zk5Ak= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/P0fRrSPpZeCVxzjqGcI6iVmG2xb43+IR8cjqM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -495,6 +1001,7 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -503,6 +1010,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -511,9 +1019,16 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -523,10 +1038,13 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -534,29 +1052,65 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -565,20 +1119,28 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -591,41 +1153,94 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -634,9 +1249,12 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -651,14 +1269,36 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -668,35 +1308,92 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -704,51 +1401,93 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.6 h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/apimachinery v0.20.6 h1:R5p3SlhaABYShQSO6LpPsYHjV05Q+79eBUR0Ut/f4tk= +k8s.io/api v0.22.5 h1:xk7C+rMjF/EGELiD560jdmwzrB788mfcHiNbMQLIVI8= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apiserver v0.20.6 h1:NnVriMMOpqQX+dshbDoZixqmBhfgrPk2uOh2fzp9vHE= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5 h1:cIPwldOYm1Slq9VLBRPtEYpyhjIm1C6aAMAoENuvN9s= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.20.6 h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo= +k8s.io/apiserver v0.22.5 h1:71krQxCUz218ecb+nPhfDsNB6QgP1/4EMvi1a2uYBlg= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/component-base v0.20.6 h1:G0inASS5vAqCpzs7M4Sp9dv9d0aElpz39zDHbSB4f4g= +k8s.io/client-go v0.22.5 h1:I8Zn/UqIdi2r02aZmhaJ1hqMxcpfJ3t5VqvHtctHYFo= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/cri-api v0.20.6 h1:iXX0K2pRrbR8yXbZtDK/bSnmg/uSqIFiVJK1x4LUOMc= +k8s.io/component-base v0.22.5 h1:U0eHqZm7mAFE42hFwYhY6ze/MmVaW00JpMrzVsQmzYE= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= +k8s.io/cri-api v0.25.0 h1:INwdXsCDSA/0hGNdPxdE2dQD6ft/5K1EaKXZixvSQxg= +k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3 h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/identifiers/validate.go b/identifiers/validate.go index f52317b..cbd3a52 100644 --- a/identifiers/validate.go +++ b/identifiers/validate.go @@ -25,10 +25,10 @@ package identifiers import ( + "fmt" "regexp" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) const ( @@ -51,15 +51,15 @@ var ( // In general identifiers that pass this validation should be safe for use as filesystem path components. func Validate(s string) error { if len(s) == 0 { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty") + return fmt.Errorf("identifier must not be empty: %w", errdefs.ErrInvalidArgument) } if len(s) > maxLength { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength) + return fmt.Errorf("identifier %q greater than maximum length (%d characters): %w", s, maxLength, errdefs.ErrInvalidArgument) } if !identifierRe.MatchString(s) { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe) + return fmt.Errorf("identifier %q must match %v: %w", s, identifierRe, errdefs.ErrInvalidArgument) } return nil } diff --git a/image.go b/image.go index f35f0ed..784df5d 100644 --- a/image.go +++ b/image.go @@ -19,6 +19,7 @@ package containerd import ( "context" "encoding/json" + "errors" "fmt" "strings" "sync/atomic" @@ -27,13 +28,13 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/pkg/kmutex" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/snapshots" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "golang.org/x/sync/semaphore" ) @@ -61,6 +62,8 @@ type Image interface { ContentStore() content.Store // Metadata returns the underlying image metadata Metadata() images.Image + // Platform returns the platform match comparer. Can be nil. + Platform() platforms.MatchComparer } type usageOptions struct { @@ -285,6 +288,10 @@ type UnpackConfig struct { // CheckPlatformSupported is whether to validate that a snapshotter // supports an image's platform before unpacking CheckPlatformSupported bool + // DuplicationSuppressor is used to make sure that there is only one + // in-flight fetch request or unpack handler for a given descriptor's + // digest or chain ID. + DuplicationSuppressor kmutex.KeyedLocker } // UnpackOpt provides configuration for unpack @@ -298,6 +305,14 @@ func WithSnapshotterPlatformCheck() UnpackOpt { } } +// WithUnpackDuplicationSuppressor sets `DuplicationSuppressor` on the UnpackConfig. +func WithUnpackDuplicationSuppressor(suppressor kmutex.KeyedLocker) UnpackOpt { + return func(ctx context.Context, uc *UnpackConfig) error { + uc.DuplicationSuppressor = suppressor + return nil + } +} + func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error { ctx, done, err := i.client.WithLease(ctx) if err != nil { @@ -397,10 +412,10 @@ func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer, cs := i.ContentStore() diffIDs, err := i.i.RootFS(ctx, cs, platform) if err != nil { - return nil, errors.Wrap(err, "failed to resolve rootfs") + return nil, fmt.Errorf("failed to resolve rootfs: %w", err) } if len(diffIDs) != len(manifest.Layers) { - return nil, errors.Errorf("mismatched image rootfs and manifest layers") + return nil, errors.New("mismatched image rootfs and manifest layers") } layers := make([]rootfs.Layer, len(diffIDs)) for i := range diffIDs { @@ -448,3 +463,7 @@ func (i *image) checkSnapshotterSupport(ctx context.Context, snapshotterName str func (i *image) ContentStore() content.Store { return i.client.ContentStore() } + +func (i *image) Platform() platforms.MatchComparer { + return i.platform +} diff --git a/image_store.go b/image_store.go index fd79e89..a970282 100644 --- a/image_store.go +++ b/image_store.go @@ -129,6 +129,7 @@ func imagesFromProto(imagespb []imagesapi.Image) []images.Image { var images []images.Image for _, image := range imagespb { + image := image images = append(images, imageFromProto(&image)) } diff --git a/images/archive/exporter.go b/images/archive/exporter.go index 7801b25..6943a7f 100644 --- a/images/archive/exporter.go +++ b/images/archive/exporter.go @@ -20,6 +20,7 @@ import ( "archive/tar" "context" "encoding/json" + "fmt" "io" "path" "sort" @@ -31,7 +32,6 @@ import ( digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type exportOptions struct { @@ -176,12 +176,15 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts } name := desc.Annotations[images.AnnotationImageName] - if name != "" && !eo.skipDockerManifest { + if name != "" { mt.names = append(mt.names, name) } case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: d, ok := resolvedIndex[desc.Digest] if !ok { + if err := desc.Digest.Validate(); err != nil { + return err + } records = append(records, blobRecord(store, desc, &eo.blobRecordOptions)) p, err := content.ReadBlob(ctx, store, desc) @@ -212,26 +215,24 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts records = append(records, r...) } - if !eo.skipDockerManifest { - if len(manifests) >= 1 { - if len(manifests) > 1 { - sort.SliceStable(manifests, func(i, j int) bool { - if manifests[i].Platform == nil { - return false - } - if manifests[j].Platform == nil { - return true - } - return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) - }) - } - d = manifests[0].Digest - dManifests[d] = &exportManifest{ - manifest: manifests[0], - } - } else if eo.platform != nil { - return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform") + if len(manifests) >= 1 { + if len(manifests) > 1 { + sort.SliceStable(manifests, func(i, j int) bool { + if manifests[i].Platform == nil { + return false + } + if manifests[j].Platform == nil { + return true + } + return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) + }) } + d = manifests[0].Digest + dManifests[d] = &exportManifest{ + manifest: manifests[0], + } + } else if eo.platform != nil { + return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound) } resolvedIndex[desc.Digest] = d } @@ -243,14 +244,14 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts } default: - return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported") + return fmt.Errorf("only manifests may be exported: %w", errdefs.ErrInvalidArgument) } } - if len(dManifests) > 0 { + if !eo.skipDockerManifest && len(dManifests) > 0 { tr, err := manifestsRecord(ctx, store, dManifests) if err != nil { - return errors.Wrap(err, "unable to create manifests file") + return fmt.Errorf("unable to create manifests file: %w", err) } records = append(records, tr) @@ -271,6 +272,9 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}, brOpts *blobRecordOptions) ([]tarRecord, error) { var records []tarRecord exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if err := desc.Digest.Validate(); err != nil { + return nil, err + } records = append(records, blobRecord(store, desc, brOpts)) algorithms[desc.Digest.Algorithm().String()] = struct{}{} return nil, nil @@ -316,7 +320,7 @@ func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOp CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { r, err := cs.ReaderAt(ctx, desc) if err != nil { - return 0, errors.Wrap(err, "failed to get reader") + return 0, fmt.Errorf("failed to get reader: %w", err) } defer r.Close() @@ -325,10 +329,10 @@ func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOp n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) if err != nil { - return 0, errors.Wrap(err, "failed to copy to tar") + return 0, fmt.Errorf("failed to copy to tar: %w", err) } if dgstr.Digest() != desc.Digest { - return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest()) + return 0, fmt.Errorf("unexpected digest %s copied", dgstr.Digest()) } return n, nil }, @@ -424,10 +428,13 @@ func manifestsRecord(ctx context.Context, store content.Provider, manifests map[ return tarRecord{}, err } if err := manifest.Config.Digest.Validate(); err != nil { - return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest) + return tarRecord{}, fmt.Errorf("invalid manifest %q: %w", m.manifest.Digest, err) } dgst := manifest.Config.Digest + if err := dgst.Validate(); err != nil { + return tarRecord{}, err + } mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded()) for _, l := range manifest.Layers { path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded()) @@ -491,10 +498,10 @@ func writeTar(ctx context.Context, tw *tar.Writer, recordsWithEmpty []tarRecord) return err } if n != record.Header.Size { - return errors.Errorf("unexpected copy size for %s", record.Header.Name) + return fmt.Errorf("unexpected copy size for %s", record.Header.Name) } } else if record.Header.Size > 0 { - return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) + return fmt.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) } } return nil diff --git a/images/archive/importer.go b/images/archive/importer.go index 2d04658..ea7891f 100644 --- a/images/archive/importer.go +++ b/images/archive/importer.go @@ -22,9 +22,9 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "path" "github.com/containerd/containerd/archive/compression" @@ -32,10 +32,10 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type importOpts struct { @@ -55,12 +55,12 @@ func WithImportCompression() ImportOpt { } // ImportIndex imports an index from a tar archive image bundle -// - implements Docker v1.1, v1.2 and OCI v1. -// - prefers OCI v1 when provided -// - creates OCI index for Docker formats -// - normalizes Docker references and adds as OCI ref name -// e.g. alpine:latest -> docker.io/library/alpine:latest -// - existing OCI reference names are untouched +// - implements Docker v1.1, v1.2 and OCI v1. +// - prefers OCI v1 when provided +// - creates OCI index for Docker formats +// - normalizes Docker references and adds as OCI ref name +// e.g. alpine:latest -> docker.io/library/alpine:latest +// - existing OCI reference names are untouched func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { var ( tr = tar.NewReader(reader) @@ -94,6 +94,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname) } + //nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA { if hdr.Typeflag != tar.TypeDir { log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored") @@ -104,16 +105,16 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt hdrName := path.Clean(hdr.Name) if hdrName == ocispec.ImageLayoutFile { if err = onUntarJSON(tr, &ociLayout); err != nil { - return ocispec.Descriptor{}, errors.Wrapf(err, "untar oci layout %q", hdr.Name) + return ocispec.Descriptor{}, fmt.Errorf("untar oci layout %q: %w", hdr.Name, err) } } else if hdrName == "manifest.json" { if err = onUntarJSON(tr, &mfsts); err != nil { - return ocispec.Descriptor{}, errors.Wrapf(err, "untar manifest %q", hdr.Name) + return ocispec.Descriptor{}, fmt.Errorf("untar manifest %q: %w", hdr.Name, err) } } else { dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName) if err != nil { - return ocispec.Descriptor{}, errors.Wrapf(err, "failed to ingest %q", hdr.Name) + return ocispec.Descriptor{}, fmt.Errorf("failed to ingest %q: %w", hdr.Name, err) } blobs[hdrName] = ocispec.Descriptor{ @@ -128,12 +129,12 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt // as Docker v1.1 or v1.2. if ociLayout.Version != "" { if ociLayout.Version != ocispec.ImageLayoutVersion { - return ocispec.Descriptor{}, errors.Errorf("unsupported OCI version %s", ociLayout.Version) + return ocispec.Descriptor{}, fmt.Errorf("unsupported OCI version %s", ociLayout.Version) } idx, ok := blobs["index.json"] if !ok { - return ocispec.Descriptor{}, errors.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion) + return ocispec.Descriptor{}, fmt.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion) } idx.MediaType = ocispec.MediaTypeImageIndex @@ -141,13 +142,13 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt } if mfsts == nil { - return ocispec.Descriptor{}, errors.Errorf("unrecognized image format") + return ocispec.Descriptor{}, errors.New("unrecognized image format") } for name, linkname := range symlinks { desc, ok := blobs[linkname] if !ok { - return ocispec.Descriptor{}, errors.Errorf("no target for symlink layer from %q to %q", name, linkname) + return ocispec.Descriptor{}, fmt.Errorf("no target for symlink layer from %q to %q", name, linkname) } blobs[name] = desc } @@ -160,13 +161,13 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt for _, mfst := range mfsts { config, ok := blobs[mfst.Config] if !ok { - return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config) + return ocispec.Descriptor{}, fmt.Errorf("image config %q not found", mfst.Config) } config.MediaType = images.MediaTypeDockerSchema2Config layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers") + return ocispec.Descriptor{}, fmt.Errorf("failed to resolve layers: %w", err) } manifest := struct { @@ -183,18 +184,28 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt desc, err := writeManifest(ctx, store, manifest, manifest.MediaType) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "write docker manifest") + return ocispec.Descriptor{}, fmt.Errorf("write docker manifest: %w", err) } - platforms, err := images.Platforms(ctx, store, desc) + imgPlatforms, err := images.Platforms(ctx, store, desc) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "unable to resolve platform") + return ocispec.Descriptor{}, fmt.Errorf("unable to resolve platform: %w", err) } - if len(platforms) > 0 { + if len(imgPlatforms) > 0 { // Only one platform can be resolved from non-index manifest, // The platform can only come from the config included above, // if the config has no platform it can be safely omitted. - desc.Platform = &platforms[0] + desc.Platform = &imgPlatforms[0] + + // If the image we've just imported is a Windows image without the OSVersion set, + // we could just assume it matches this host's OS Version. Without this, the + // children labels might not be set on the image content, leading to it being + // garbage collected, breaking the image. + // See: https://github.com/containerd/containerd/issues/5690 + if desc.Platform.OS == "windows" && desc.Platform.OSVersion == "" { + platform := platforms.DefaultSpec() + desc.Platform.OSVersion = platform.OSVersion + } } if len(mfst.RepoTags) == 0 { @@ -222,12 +233,14 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex) } +const ( + kib = 1024 + mib = 1024 * kib + jsonLimit = 20 * mib +) + func onUntarJSON(r io.Reader, j interface{}) error { - b, err := ioutil.ReadAll(r) - if err != nil { - return err - } - return json.Unmarshal(b, j) + return json.NewDecoder(io.LimitReader(r, jsonLimit)).Decode(j) } func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) { @@ -247,7 +260,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string for i, f := range layerFiles { desc, ok := blobs[f] if !ok { - return nil, errors.Errorf("layer %q not found", f) + return nil, fmt.Errorf("layer %q not found", f) } layers[i] = desc descs[desc.Digest] = &layers[i] @@ -259,15 +272,19 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string if ok { desc := descs[digest.Digest(dgst)] if desc != nil { - desc.MediaType = images.MediaTypeDockerSchema2LayerGzip desc.Digest = info.Digest desc.Size = info.Size + mediaType, err := detectLayerMediaType(ctx, store, *desc) + if err != nil { + return fmt.Errorf("failed to detect media type of layer: %w", err) + } + desc.MediaType = mediaType } } return nil }, filters...) if err != nil { - return nil, errors.Wrap(err, "failure checking for compressed blobs") + return nil, fmt.Errorf("failure checking for compressed blobs: %w", err) } for i, desc := range layers { @@ -277,14 +294,18 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string // Open blob, resolve media type ra, err := store.ReaderAt(ctx, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest) + return nil, fmt.Errorf("failed to open %q (%s): %w", layerFiles[i], desc.Digest, err) } s, err := compression.DecompressStream(content.NewReader(ra)) if err != nil { - return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i]) + ra.Close() + return nil, fmt.Errorf("failed to detect compression for %q: %w", layerFiles[i], err) } if s.GetCompression() == compression.Uncompressed { if compress { + if err := desc.Digest.Validate(); err != nil { + return nil, err + } ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) labels := map[string]string{ "containerd.io/uncompressed": desc.Digest.String(), @@ -292,6 +313,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) if err != nil { s.Close() + ra.Close() return nil, err } layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip @@ -302,7 +324,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip } s.Close() - + ra.Close() } return layers, nil } @@ -310,7 +332,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) { w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer") + return ocispec.Descriptor{}, fmt.Errorf("failed to open writer: %w", err) } defer func() { @@ -320,7 +342,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string } }() if err := w.Truncate(0); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer") + return ocispec.Descriptor{}, fmt.Errorf("failed to truncate writer: %w", err) } cw, err := compression.CompressStream(w, compression.Gzip) @@ -337,7 +359,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string cst, err := w.Status() if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status") + return ocispec.Descriptor{}, fmt.Errorf("failed to get writer status: %w", err) } desc.Digest = w.Digest() @@ -345,7 +367,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil { if !errdefs.IsAlreadyExists(err) { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit") + return ocispec.Descriptor{}, fmt.Errorf("failed to commit: %w", err) } } @@ -369,3 +391,29 @@ func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{ return desc, nil } + +func detectLayerMediaType(ctx context.Context, store content.Store, desc ocispec.Descriptor) (string, error) { + var mediaType string + // need to parse existing blob to use the proper media type + bytes := make([]byte, 10) + ra, err := store.ReaderAt(ctx, desc) + if err != nil { + return "", fmt.Errorf("failed to read content store to detect layer media type: %w", err) + } + defer ra.Close() + _, err = ra.ReadAt(bytes, 0) + if err != nil && err != io.EOF { + return "", fmt.Errorf("failed to read header bytes from layer to detect media type: %w", err) + } + if err == io.EOF { + // in the case of an empty layer then the media type should be uncompressed + return images.MediaTypeDockerSchema2Layer, nil + } + switch c := compression.DetectCompression(bytes); c { + case compression.Uncompressed: + mediaType = images.MediaTypeDockerSchema2Layer + default: + mediaType = images.MediaTypeDockerSchema2LayerGzip + } + return mediaType, nil +} diff --git a/images/archive/reference.go b/images/archive/reference.go index ce9fe98..ba19b11 100644 --- a/images/archive/reference.go +++ b/images/archive/reference.go @@ -17,12 +17,12 @@ package archive import ( + "fmt" "strings" "github.com/containerd/containerd/reference" distref "github.com/containerd/containerd/reference/docker" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // FilterRefPrefix restricts references to having the given image @@ -72,7 +72,7 @@ func normalizeReference(ref string) (string, error) { // TODO: Replace this function to not depend on reference package normalized, err := distref.ParseDockerRef(ref) if err != nil { - return "", errors.Wrapf(err, "normalize image ref %q", ref) + return "", fmt.Errorf("normalize image ref %q: %w", ref, err) } return normalized.String(), nil @@ -81,7 +81,7 @@ func normalizeReference(ref string) (string, error) { func familiarizeReference(ref string) (string, error) { named, err := distref.ParseNormalizedNamed(ref) if err != nil { - return "", errors.Wrapf(err, "failed to parse %q", ref) + return "", fmt.Errorf("failed to parse %q: %w", ref, err) } named = distref.TagNameOnly(named) diff --git a/images/converter/default.go b/images/converter/default.go index 9f6d3fe..65224bd 100644 --- a/images/converter/default.go +++ b/images/converter/default.go @@ -48,12 +48,35 @@ func DefaultIndexConvertFunc(layerConvertFunc ConvertFunc, docker2oci bool, plat return c.convert } +// ConvertHookFunc is a callback function called during conversion of a blob. +// orgDesc is the target descriptor to convert. newDesc is passed if conversion happens. +type ConvertHookFunc func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) + +// ConvertHooks is a configuration for hook callbacks called during blob conversion. +type ConvertHooks struct { + // PostConvertHook is a callback function called for each blob after conversion is done. + PostConvertHook ConvertHookFunc +} + +// IndexConvertFuncWithHook is the convert func used by Convert with hook functions support. +func IndexConvertFuncWithHook(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer, hooks ConvertHooks) ConvertFunc { + c := &defaultConverter{ + layerConvertFunc: layerConvertFunc, + docker2oci: docker2oci, + platformMC: platformMC, + diffIDMap: make(map[digest.Digest]digest.Digest), + hooks: hooks, + } + return c.convert +} + type defaultConverter struct { layerConvertFunc ConvertFunc docker2oci bool platformMC platforms.MatchComparer diffIDMap map[digest.Digest]digest.Digest // key: old diffID, value: new diffID diffIDMapMu sync.RWMutex + hooks ConvertHooks } // convert dispatches desc.MediaType and calls c.convert{Layer,Manifest,Index,Config}. @@ -76,6 +99,15 @@ func (c *defaultConverter) convert(ctx context.Context, cs content.Store, desc o if err != nil { return nil, err } + + if c.hooks.PostConvertHook != nil { + if newDescPost, err := c.hooks.PostConvertHook(ctx, cs, desc, newDesc); err != nil { + return nil, err + } else if newDescPost != nil { + newDesc = newDescPost + } + } + if images.IsDockerType(desc.MediaType) { if c.docker2oci { if newDesc == nil { @@ -100,7 +132,7 @@ func copyDesc(desc ocispec.Descriptor) *ocispec.Descriptor { return &descCopy } -// convertLayer converts image image layers if c.layerConvertFunc is set. +// convertLayer converts image layers if c.layerConvertFunc is set. // // c.layerConvertFunc can be nil, e.g., for converting Docker media types to OCI ones. func (c *defaultConverter) convertLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { @@ -346,25 +378,6 @@ func clearDockerV1DummyID(cfg DualConfig) (bool, error) { return modified, nil } -// ObjectWithMediaType represents an object with a MediaType field -// Deprecated -type ObjectWithMediaType struct { - // MediaType appears on Docker manifests and manifest lists. - MediaType string `json:"mediaType,omitempty"` -} - -// DualManifest covers Docker manifest and OCI manifest -// Deprecated: use github.com/opencontainers/image-spec/specs-go/v1.Manifest -type DualManifest struct { - ocispec.Manifest -} - -// DualIndex covers Docker manifest list and OCI index -// Deprecated: use github.com/opencontainers/image-spec/specs-go/v1.Index -type DualIndex struct { - ocispec.Index -} - // DualConfig covers Docker config (v1.0, v1.1, v1.2) and OCI config. // Unmarshalled as map[string]*json.RawMessage to retain unknown fields on remarshalling. type DualConfig map[string]*json.RawMessage diff --git a/images/converter/uncompress/uncompress.go b/images/converter/uncompress/uncompress.go index aca0035..30ae02c 100644 --- a/images/converter/uncompress/uncompress.go +++ b/images/converter/uncompress/uncompress.go @@ -17,11 +17,11 @@ package uncompress import ( - "compress/gzip" "context" "fmt" "io" + "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" @@ -49,7 +49,7 @@ func LayerConvertFunc(ctx context.Context, cs content.Store, desc ocispec.Descri } defer readerAt.Close() sr := io.NewSectionReader(readerAt, 0, desc.Size) - newR, err := gzip.NewReader(sr) + newR, err := compression.DecompressStream(sr) if err != nil { return nil, err } @@ -112,9 +112,9 @@ func convertMediaType(mt string) string { return images.MediaTypeDockerSchema2Layer case images.MediaTypeDockerSchema2LayerForeignGzip: return images.MediaTypeDockerSchema2LayerForeign - case ocispec.MediaTypeImageLayerGzip: + case ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayerZstd: return ocispec.MediaTypeImageLayer - case ocispec.MediaTypeImageLayerNonDistributableGzip: + case ocispec.MediaTypeImageLayerNonDistributableGzip, ocispec.MediaTypeImageLayerNonDistributableZstd: return ocispec.MediaTypeImageLayerNonDistributable default: return mt diff --git a/images/handlers.go b/images/handlers.go index 05a9017..077d88e 100644 --- a/images/handlers.go +++ b/images/handlers.go @@ -18,6 +18,7 @@ package images import ( "context" + "errors" "fmt" "sort" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" ) @@ -33,13 +33,17 @@ import ( var ( // ErrSkipDesc is used to skip processing of a descriptor and // its descendants. - ErrSkipDesc = fmt.Errorf("skip descriptor") + ErrSkipDesc = errors.New("skip descriptor") // ErrStopHandler is used to signify that the descriptor // has been handled and should not be handled further. // This applies only to a single descriptor in a handler // chain and does not apply to descendant descriptors. - ErrStopHandler = fmt.Errorf("stop handler") + ErrStopHandler = errors.New("stop handler") + + // ErrEmptyWalk is used when the WalkNotEmpty handlers return no + // children (e.g.: they were filtered out). + ErrEmptyWalk = errors.New("image might be filtered out") ) // Handler handles image manifests @@ -99,6 +103,36 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err } } } + return nil +} + +// WalkNotEmpty works the same way Walk does, with the exception that it ensures that +// some children are still found by Walking the descriptors (for example, not all of +// them have been filtered out by one of the handlers). If there are no children, +// then an ErrEmptyWalk error is returned. +func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + isEmpty := true + var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := handler.Handle(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + isEmpty = false + } + + return children, nil + } + + err := Walk(ctx, notEmptyHandler, descs...) + if err != nil { + return err + } + + if isEmpty { + return ErrEmptyWalk + } return nil } @@ -274,7 +308,7 @@ func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc if n > 0 { if len(children) == 0 { - return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest") + return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound) } if len(children) > n { children = children[:n] diff --git a/images/image.go b/images/image.go index 2e5cd61..d45afe4 100644 --- a/images/image.go +++ b/images/image.go @@ -29,7 +29,6 @@ import ( "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // Image provides the model for how containerd views container images. @@ -115,7 +114,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor var size int64 return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { if desc.Size < 0 { - return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) } size += desc.Size return nil, nil @@ -156,7 +155,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest) + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) } var manifest ocispec.Manifest @@ -200,7 +199,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest) + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) } var idx ocispec.Index @@ -236,15 +235,15 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } return descs, nil } - return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) + return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound) }), image); err != nil { return ocispec.Manifest{}, err } if len(m) == 0 { - err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest) + err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound) if wasIndex { - err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest) + err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound) } return ocispec.Manifest{}, err } @@ -309,7 +308,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil } - return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest) + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) } // TODO(stevvooe): It is possible that referenced conponents could have @@ -324,7 +323,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip missing = append(missing, desc) continue } else { - return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest) + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err) } } ra.Close() @@ -346,7 +345,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr } if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest) + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) } // TODO(stevvooe): We just assume oci manifest, for now. There may be @@ -365,7 +364,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr } if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest) + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) } var index ocispec.Index diff --git a/images/mediatypes.go b/images/mediatypes.go index 785d712..671e160 100644 --- a/images/mediatypes.go +++ b/images/mediatypes.go @@ -18,12 +18,12 @@ package images import ( "context" + "fmt" "sort" "strings" "github.com/containerd/containerd/errdefs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // mediatype definitions for image components handled in containerd. @@ -87,7 +87,7 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) { } return "", nil default: - return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType) + return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented) } } diff --git a/import.go b/import.go index 6080161..8936d88 100644 --- a/import.go +++ b/import.go @@ -31,11 +31,13 @@ import ( ) type importOpts struct { - indexName string - imageRefT func(string) string - dgstRefT func(digest.Digest) string - allPlatforms bool - compress bool + indexName string + imageRefT func(string) string + dgstRefT func(digest.Digest) string + skipDgstRef func(string) bool + allPlatforms bool + platformMatcher platforms.MatchComparer + compress bool } // ImportOpt allows the caller to specify import specific options @@ -59,6 +61,17 @@ func WithDigestRef(f func(digest.Digest) string) ImportOpt { } } +// WithSkipDigestRef is used to specify when to skip applying +// WithDigestRef. The callback receives an image reference (or an empty +// string if not specified in the image). When the callback returns true, +// the skip occurs. +func WithSkipDigestRef(f func(string) bool) ImportOpt { + return func(c *importOpts) error { + c.skipDgstRef = f + return nil + } +} + // WithIndexName creates a tag pointing to the imported index func WithIndexName(name string) ImportOpt { return func(c *importOpts) error { @@ -75,6 +88,14 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt { } } +// WithImportPlatform is used to import content for specific platform. +func WithImportPlatform(platformMacher platforms.MatchComparer) ImportOpt { + return func(c *importOpts) error { + c.platformMatcher = platformMacher + return nil + } +} + // WithImportCompression compresses uncompressed layers on import. // This is used for import formats which do not include the manifest. func WithImportCompression() ImportOpt { @@ -123,9 +144,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt Target: index, }) } - var platformMatcher = platforms.All - if !iopts.allPlatforms { - platformMatcher = c.platform + var platformMatcher = c.platform + if iopts.allPlatforms { + platformMatcher = platforms.All + } else if iopts.platformMatcher != nil { + platformMatcher = iopts.platformMatcher } var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { @@ -152,6 +175,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt Target: m, }) } + if iopts.skipDgstRef != nil { + if iopts.skipDgstRef(name) { + continue + } + } if iopts.dgstRefT != nil { ref := iopts.dgstRefT(m.Digest) if ref != "" { @@ -168,7 +196,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt handler = images.FilterPlatforms(handler, platformMatcher) handler = images.SetChildrenLabels(cs, handler) - if err := images.Walk(ctx, handler, index); err != nil { + if err := images.WalkNotEmpty(ctx, handler, index); err != nil { return nil, err } diff --git a/install.go b/install.go index 7a8311c..16cff08 100644 --- a/install.go +++ b/install.go @@ -19,6 +19,8 @@ package containerd import ( "archive/tar" "context" + "errors" + "fmt" "os" "path/filepath" "runtime" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/pkg/errors" ) // Install a binary image into the opt service @@ -66,6 +67,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) cr := content.NewReader(ra) r, err := compression.DecompressStream(cr) if err != nil { + ra.Close() return err } if _, err := archive.Apply(ctx, path, r, archive.WithFilter(func(hdr *tar.Header) (bool, error) { @@ -81,15 +83,17 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) } if result && !config.Replace { if _, err := os.Lstat(filepath.Join(path, hdr.Name)); err == nil { - return false, errors.Errorf("cannot replace %s in %s", hdr.Name, path) + return false, fmt.Errorf("cannot replace %s in %s", hdr.Name, path) } } return result, nil })); err != nil { r.Close() + ra.Close() return err } r.Close() + ra.Close() } return nil } diff --git a/integration/addition_gids_test.go b/integration/addition_gids_test.go index 7674fe0..238d7ca 100644 --- a/integration/addition_gids_test.go +++ b/integration/addition_gids_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,7 +20,7 @@ package integration import ( - "io/ioutil" + "fmt" "os" "path/filepath" "testing" @@ -27,63 +28,102 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestAdditionalGids(t *testing.T) { - testPodLogDir, err := ioutil.TempDir("/tmp", "additional-gids") - require.NoError(t, err) - defer os.RemoveAll(testPodLogDir) + testImage := GetImage(BusyBox) + EnsureImageExists(t, testImage) + type testCase struct { + description string + opts []ContainerOpts + expected string + } - t.Log("Create a sandbox with log directory") - sbConfig := PodSandboxConfig("sandbox", "additional-gids", - WithPodLogDirectory(testPodLogDir)) - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + testCases := []testCase{ + { + description: "Equivalent of `docker run` (no option)", + opts: nil, + expected: "groups=0(root),10(wheel)", + }, + { + description: "Equivalent of `docker run --group-add 1 --group-add 1234`", + opts: []ContainerOpts{WithSupplementalGroups([]int64{1 /*daemon*/, 1234 /*new group*/})}, + expected: "groups=0(root),1(daemon),10(wheel),1234", + }, + { + description: "Equivalent of `docker run --user 1234`", + opts: []ContainerOpts{WithRunAsUser(1234)}, + expected: "groups=0(root)", + }, + { + description: "Equivalent of `docker run --user 1234:1234`", + opts: []ContainerOpts{WithRunAsUser(1234), WithRunAsGroup(1234)}, + expected: "groups=1234", + }, + { + description: "Equivalent of `docker run --user 1234 --group-add 1234`", + opts: []ContainerOpts{WithRunAsUser(1234), WithSupplementalGroups([]int64{1234})}, + expected: "groups=0(root),1234", + }, + { + description: "Equivalent of `docker run --user daemon` (Supported by CRI, although unsupported by kube-apiserver)", + opts: []ContainerOpts{WithRunAsUsername("daemon")}, + expected: "groups=1(daemon)", + }, + { + description: "Equivalent of `docker run --user daemon --group-add 1234` (Supported by CRI, although unsupported by kube-apiserver)", + opts: []ContainerOpts{WithRunAsUsername("daemon"), WithSupplementalGroups([]int64{1234})}, + expected: "groups=1(daemon),1234", + }, + } - var ( - testImage = GetImage(BusyBox) - containerName = "test-container" - ) - t.Logf("Pull test image %q", testImage) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + for i, tc := range testCases { + i, tc := i, tc + tBasename := fmt.Sprintf("case-%d", i) + t.Run(tBasename, func(t *testing.T) { + t.Log(tc.description) + t.Logf("Expected=%q", tc.expected) - t.Log("Create a container to print id") - cnConfig := ContainerConfig( - containerName, - testImage, - WithCommand("id"), - WithLogPath(containerName), - WithSupplementalGroups([]int64{1 /*daemon*/, 1234 /*new group*/}), - ) - cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) - require.NoError(t, err) + testPodLogDir := t.TempDir() - t.Log("Start the container") - require.NoError(t, runtimeService.StartContainer(cn)) + t.Log("Create a sandbox with log directory") + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", tBasename, + WithPodLogDirectory(testPodLogDir)) - t.Log("Wait for container to finish running") - require.NoError(t, Eventually(func() (bool, error) { - s, err := runtimeService.ContainerStatus(cn) - if err != nil { - return false, err - } - if s.GetState() == runtime.ContainerState_CONTAINER_EXITED { - return true, nil - } - return false, nil - }, time.Second, 30*time.Second)) + t.Log("Create a container to print id") + containerName := tBasename + cnConfig := ContainerConfig( + containerName, + testImage, + append( + []ContainerOpts{ + WithCommand("id"), + WithLogPath(containerName), + }, tc.opts...)..., + ) + cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) + require.NoError(t, err) - t.Log("Search additional groups in container log") - content, err := ioutil.ReadFile(filepath.Join(testPodLogDir, containerName)) - assert.NoError(t, err) - assert.Contains(t, string(content), "groups=1(daemon),10(wheel),1234") + t.Log("Start the container") + require.NoError(t, runtimeService.StartContainer(cn)) + + t.Log("Wait for container to finish running") + require.NoError(t, Eventually(func() (bool, error) { + s, err := runtimeService.ContainerStatus(cn) + if err != nil { + return false, err + } + if s.GetState() == runtime.ContainerState_CONTAINER_EXITED { + return true, nil + } + return false, nil + }, time.Second, 30*time.Second)) + + t.Log("Search additional groups in container log") + content, err := os.ReadFile(filepath.Join(testPodLogDir, containerName)) + assert.NoError(t, err) + assert.Contains(t, string(content), tc.expected+"\n") + }) + } } diff --git a/integration/client/client_test.go b/integration/client/client_test.go index 022669a..963df45 100644 --- a/integration/client/client_test.go +++ b/integration/client/client_test.go @@ -22,9 +22,7 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" - "os/exec" "testing" "time" @@ -42,6 +40,7 @@ import ( "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" ) var ( @@ -90,7 +89,7 @@ func TestMain(m *testing.M) { if !noDaemon { sys.ForceRemoveAll(defaultRoot) - stdioFile, err := ioutil.TempFile("", "") + stdioFile, err := os.CreateTemp("", "") if err != nil { fmt.Fprintf(os.Stderr, "could not create a new stdio temp file: %s\n", err) os.Exit(1) @@ -112,6 +111,10 @@ func TestMain(m *testing.M) { fmt.Fprintf(os.Stderr, "%s: %s\n", err, buf.String()) os.Exit(1) } + } else { + // Otherwise if no-daemon was specified we need to connect to an already running ctrd instance. + // Set the addr field on the daemon object so it knows what to try connecting to. + ctrd.addr = address } waitCtx, waitCancel := context.WithTimeout(ctx, 4*time.Second) @@ -330,7 +333,7 @@ func TestImagePullAllPlatforms(t *testing.T) { defer cancel() cs := client.ContentStore() - img, err := client.Fetch(ctx, "k8s.gcr.io/pause:3.5") + img, err := client.Fetch(ctx, "k8s.gcr.io/pause:3.6") if err != nil { t.Fatal(err) } @@ -380,7 +383,7 @@ func TestImagePullSomePlatforms(t *testing.T) { // Note: Must be different to the image used in TestImagePullAllPlatforms // or it will see the content pulled by that, and fail. - img, err := client.Fetch(ctx, "k8s.gcr.io/pause:3.2", opts...) + img, err := client.Fetch(ctx, "registry.k8s.io/e2e-test-images/busybox:1.29-2", opts...) if err != nil { t.Fatal(err) } @@ -497,7 +500,7 @@ func TestClientReconnect(t *testing.T) { } func createShimDebugConfig() string { - f, err := ioutil.TempFile("", "containerd-config-") + f, err := os.CreateTemp("", "containerd-config-") if err != nil { fmt.Fprintf(os.Stderr, "Failed to create config file: %s\n", err) os.Exit(1) diff --git a/integration/client/client_ttrpc_test.go b/integration/client/client_ttrpc_test.go index 1c7aeb9..774b6cf 100644 --- a/integration/client/client_ttrpc_test.go +++ b/integration/client/client_ttrpc_test.go @@ -30,6 +30,9 @@ import ( ) func TestClientTTRPC_New(t *testing.T) { + if testing.Short() { + t.Skip() + } client, err := ttrpcutil.NewClient(address + ".ttrpc") assert.NilError(t, err) @@ -38,6 +41,9 @@ func TestClientTTRPC_New(t *testing.T) { } func TestClientTTRPC_Reconnect(t *testing.T) { + if testing.Short() { + t.Skip() + } client, err := ttrpcutil.NewClient(address + ".ttrpc") assert.NilError(t, err) @@ -63,6 +69,9 @@ func TestClientTTRPC_Reconnect(t *testing.T) { } func TestClientTTRPC_Close(t *testing.T) { + if testing.Short() { + t.Skip() + } client, err := ttrpcutil.NewClient(address + ".ttrpc") assert.NilError(t, err) diff --git a/integration/client/client_unix_test.go b/integration/client/client_unix_test.go index 278b4ea..8250ae0 100644 --- a/integration/client/client_unix_test.go +++ b/integration/client/client_unix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -32,9 +33,10 @@ const ( ) var ( - testImage = "ghcr.io/containerd/busybox:1.32" - shortCommand = withProcessArgs("true") - longCommand = withProcessArgs("/bin/sh", "-c", "while true; do sleep 1; done") + testImage = "ghcr.io/containerd/busybox:1.32" + testMultiLayeredImage = "ghcr.io/containerd/volume-copy-up:2.1" + shortCommand = withProcessArgs("true") + longCommand = withProcessArgs("/bin/sh", "-c", "while true; do sleep 1; done") ) func TestImagePullSchema1WithEmptyLayers(t *testing.T) { diff --git a/integration/client/client_windows_test.go b/integration/client/client_windows_test.go index 575a06a..2ee3872 100644 --- a/integration/client/client_windows_test.go +++ b/integration/client/client_windows_test.go @@ -30,11 +30,12 @@ const ( ) var ( - defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test") - defaultState = filepath.Join(os.Getenv("programfiles"), "containerd", "state-test") - testImage string - shortCommand = withTrue() - longCommand = withProcessArgs("ping", "-t", "localhost") + defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test") + defaultState = filepath.Join(os.Getenv("programfiles"), "containerd", "state-test") + testImage string + testMultiLayeredImage = "ghcr.io/containerd/volume-copy-up:2.1" + shortCommand = withTrue() + longCommand = withProcessArgs("ping", "-t", "localhost") ) func init() { @@ -56,7 +57,17 @@ func init() { testImage = "mcr.microsoft.com/windows/nanoserver:2004" case osversion.V20H2: testImage = "mcr.microsoft.com/windows/nanoserver:20H2" + case osversion.V21H2Server: + testImage = "mcr.microsoft.com/windows/nanoserver:ltsc2022" default: + // Due to some efforts in improving down-level compatibility for Windows containers (see + // https://techcommunity.microsoft.com/t5/containers/windows-server-2022-and-beyond-for-containers/ba-p/2712487) + // the ltsc2022 image should continue to work on builds ws2022 and onwards (Windows 11 for example). With this in mind, + // if there's no mapping for the host build just use the Windows Server 2022 image. + if b > osversion.V21H2Server { + testImage = "mcr.microsoft.com/windows/nanoserver:ltsc2022" + return + } fmt.Println("No test image defined for Windows build version:", b) panic("No windows test image found for this Windows build") } diff --git a/integration/client/container_checkpoint_test.go b/integration/client/container_checkpoint_test.go index e33f4f9..9329751 100644 --- a/integration/client/container_checkpoint_test.go +++ b/integration/client/container_checkpoint_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -22,7 +23,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -455,11 +455,7 @@ func TestCheckpointRestoreWithImagePath(t *testing.T) { } // create image path store criu image files - crDir, err := ioutil.TempDir("", "test-cr") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(crDir) + crDir := t.TempDir() imagePath := filepath.Join(crDir, "cr") // checkpoint task if _, err := task.Checkpoint(ctx, WithCheckpointImagePath(imagePath)); err != nil { @@ -473,7 +469,7 @@ func TestCheckpointRestoreWithImagePath(t *testing.T) { task.Delete(ctx) // check image files have been dumped into image path - if files, err := ioutil.ReadDir(imagePath); err != nil || len(files) == 0 { + if files, err := os.ReadDir(imagePath); err != nil || len(files) == 0 { t.Fatal("failed to checkpoint with image path set") } diff --git a/integration/client/container_linux_test.go b/integration/client/container_linux_test.go index d9a83ca..85a75f7 100644 --- a/integration/client/container_linux_test.go +++ b/integration/client/container_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -23,9 +21,7 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" - "os/exec" "path/filepath" "runtime" "strings" @@ -37,130 +33,21 @@ import ( "github.com/containerd/cgroups" cgroupsv2 "github.com/containerd/cgroups/v2" . "github.com/containerd/containerd" - apievents "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log/logtest" - "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime/linux/runctypes" "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/containerd/sys" - "github.com/containerd/typeurl" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "github.com/opencontainers/runtime-spec/specs-go" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) const testUserNSImage = "ghcr.io/containerd/alpine:3.14.0" -// TestRegressionIssue4769 verifies the number of task exit events. -// -// Issue: https://github.com/containerd/containerd/issues/4769. -func TestRegressionIssue4769(t *testing.T) { - t.Parallel() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - // use unique namespace to get unique task events - id := t.Name() - ns := fmt.Sprintf("%s-%s", testNamespace, id) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ctx = namespaces.WithNamespace(ctx, ns) - ctx = logtest.WithT(ctx, t) - - image, err := client.Pull(ctx, testImage, WithPullUnpack) - if err != nil { - t.Fatal(err) - } - defer client.ImageService().Delete(ctx, testImage, images.SynchronousDelete()) - - container, err := client.NewContainer(ctx, id, - WithNewSnapshot(id, image), - WithNewSpec(oci.WithImageConfig(image), withTrue()), - WithRuntime(client.Runtime(), nil), - ) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - task, err := container.NewTask(ctx, empty()) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - statusC, err := task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - - eventStream, errC := client.EventService().Subscribe(ctx, "namespace=="+ns+",topic~=|^/tasks/exit|") - - if err := task.Start(ctx); err != nil { - t.Fatal(err) - } - - var timeout = 3 * time.Second - - select { - case et := <-statusC: - if got := et.ExitCode(); got != 0 { - t.Fatal(errors.Errorf("expect zero exit status, but got %v", got)) - } - case <-time.After(timeout): - t.Fatal(fmt.Errorf("failed to get exit event in time")) - } - - // start to check events - select { - case et := <-eventStream: - if et.Event == nil { - t.Fatal(errors.Errorf("unexpected empty event: %+v", et)) - } - - v, err := typeurl.UnmarshalAny(et.Event) - if err != nil { - t.Fatal(errors.Wrap(err, "failed to unmarshal event")) - } - - if e, ok := v.(*apievents.TaskExit); !ok { - t.Fatal(errors.Errorf("unexpected event type: %+v", v)) - } else if e.ExitStatus != 0 { - t.Fatal(errors.Errorf("expect zero exit status, but got %v", e.ExitStatus)) - } - case err := <-errC: - t.Fatal(errors.Wrap(err, "unexpected error from event service")) - - case <-time.After(timeout): - t.Fatal(fmt.Errorf("failed to get exit event in time")) - } - - if _, err := task.Delete(ctx); err != nil { - t.Fatal(err) - } - - // check duplicate event should not show up - select { - case event := <-eventStream: - t.Fatal(errors.Errorf("unexpected exit event: %+v", event)) - case err := <-errC: - t.Fatal(errors.Wrap(err, "unexpected error from event service")) - case <-time.After(timeout): - } -} - func TestTaskUpdate(t *testing.T) { t.Parallel() @@ -351,76 +238,6 @@ func TestShimInCgroup(t *testing.T) { <-statusC } -func TestDaemonRestart(t *testing.T) { - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - var ( - image Image - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err = client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - - container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - task, err := container.NewTask(ctx, empty()) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - statusC, err := task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - - if err := task.Start(ctx); err != nil { - t.Fatal(err) - } - - var exitStatus ExitStatus - if err := ctrd.Restart(func() { - exitStatus = <-statusC - }); err != nil { - t.Fatal(err) - } - - if exitStatus.Error() == nil { - t.Errorf(`first task.Wait() should have failed with "transport is closing"`) - } - - waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) - serving, err := client.IsServing(waitCtx) - waitCancel() - if !serving { - t.Fatalf("containerd did not start within 2s: %v", err) - } - - statusC, err = task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - t.Fatal(err) - } - - <-statusC -} - func TestShimDoesNotLeakPipes(t *testing.T) { containerdPid := ctrd.cmd.Process.Pid initialPipes, err := numPipes(containerdPid) @@ -551,7 +368,7 @@ func TestDaemonReconnectsToShimIOPipesOnRestart(t *testing.T) { t.Fatalf("containerd did not start within 2s: %v", err) } - // After we restared containerd we write some messages to the log pipes, simulating shim writing stuff there. + // After we restarted containerd we write some messages to the log pipes, simulating shim writing stuff there. // Then we make sure that these messages are available on the containerd log thus proving that the server reconnected to the log pipes runtimeVersion := getRuntimeVersion() logDirPath := getLogDirPath(runtimeVersion, id) @@ -575,7 +392,7 @@ func TestDaemonReconnectsToShimIOPipesOnRestart(t *testing.T) { <-statusC - stdioContents, err := ioutil.ReadFile(ctrdStdioFilePath) + stdioContents, err := os.ReadFile(ctrdStdioFilePath) if err != nil { t.Fatal(err) } @@ -628,76 +445,6 @@ func getRuntimeVersion() string { } } -func TestContainerPTY(t *testing.T) { - t.Parallel() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - var ( - image Image - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err = client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - - container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithTTY, withProcessArgs("echo", "hello"))) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - direct, err := newDirectIO(ctx, true) - if err != nil { - t.Fatal(err) - } - defer direct.Delete() - var ( - wg sync.WaitGroup - buf = bytes.NewBuffer(nil) - ) - wg.Add(1) - go func() { - defer wg.Done() - io.Copy(buf, direct.Stdout) - }() - - task, err := container.NewTask(ctx, direct.IOCreate) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - status, err := task.Wait(ctx) - if err != nil { - t.Error(err) - } - - if err := task.Start(ctx); err != nil { - t.Fatal(err) - } - - <-status - wg.Wait() - - if err := direct.Close(); err != nil { - t.Error(err) - } - - out := buf.String() - if !strings.ContainsAny(fmt.Sprintf("%#q", out), `\x00`) { - t.Fatal(`expected \x00 in output`) - } -} - func TestContainerAttach(t *testing.T) { t.Parallel() @@ -803,126 +550,6 @@ func TestContainerAttach(t *testing.T) { } } -func newDirectIO(ctx context.Context, terminal bool) (*directIO, error) { - fifos, err := cio.NewFIFOSetInDir("", "", terminal) - if err != nil { - return nil, err - } - dio, err := cio.NewDirectIO(ctx, fifos) - if err != nil { - return nil, err - } - return &directIO{DirectIO: *dio}, nil -} - -type directIO struct { - cio.DirectIO -} - -// ioCreate returns IO available for use with task creation -func (f *directIO) IOCreate(id string) (cio.IO, error) { - return f, nil -} - -// ioAttach returns IO available for use with task attachment -func (f *directIO) IOAttach(set *cio.FIFOSet) (cio.IO, error) { - return f, nil -} - -func (f *directIO) Cancel() { - // nothing to cancel as all operations are handled externally -} - -// Close closes all open fds -func (f *directIO) Close() error { - err := f.Stdin.Close() - if f.Stdout != nil { - if err2 := f.Stdout.Close(); err == nil { - err = err2 - } - } - if f.Stderr != nil { - if err2 := f.Stderr.Close(); err == nil { - err = err2 - } - } - return err -} - -// Delete removes the underlying directory containing fifos -func (f *directIO) Delete() error { - return f.DirectIO.Close() -} - -func TestContainerUsername(t *testing.T) { - t.Parallel() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - var ( - image Image - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err = client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - direct, err := newDirectIO(ctx, false) - if err != nil { - t.Fatal(err) - } - defer direct.Delete() - var ( - wg sync.WaitGroup - buf = bytes.NewBuffer(nil) - ) - wg.Add(1) - go func() { - defer wg.Done() - io.Copy(buf, direct.Stdout) - }() - - // the www-data user in the busybox image has a uid of 33 - container, err := client.NewContainer(ctx, id, - WithNewSnapshot(id, image), - WithNewSpec(oci.WithImageConfig(image), oci.WithUsername("www-data"), oci.WithProcessArgs("id", "-u")), - ) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - task, err := container.NewTask(ctx, direct.IOCreate) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - statusC, err := task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - - if err := task.Start(ctx); err != nil { - t.Fatal(err) - } - <-statusC - - wg.Wait() - - output := strings.TrimSuffix(buf.String(), "\n") - if output != "33" { - t.Errorf("expected www-data uid to be 33 but received %q", output) - } -} - func TestContainerUser(t *testing.T) { t.Parallel() t.Run("UserNameAndGroupName", func(t *testing.T) { testContainerUser(t, "www-data:www-data", "33:33") }) @@ -1485,85 +1112,10 @@ func TestContainerRuntimeOptionsv2(t *testing.T) { } } -func initContainerAndCheckChildrenDieOnKill(t *testing.T, opts ...oci.SpecOpts) { - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - var ( - image Image - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err = client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - - opts = append(opts, oci.WithImageConfig(image)) - opts = append(opts, withProcessArgs("sh", "-c", "sleep 42; echo hi")) - - container, err := client.NewContainer(ctx, id, - WithNewSnapshot(id, image), - WithNewSpec(opts...), - ) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - stdout := bytes.NewBuffer(nil) - task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout))) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - statusC, err := task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - - if err := task.Start(ctx); err != nil { - t.Fatal(err) - } - - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - t.Error(err) - } - - // Give the shim time to reap the init process and kill the orphans - select { - case <-statusC: - case <-time.After(100 * time.Millisecond): - } - - b, err := exec.Command("ps", "ax").CombinedOutput() - if err != nil { - t.Fatal(err) - } - - if strings.Contains(string(b), "sleep 42") { - t.Fatalf("killing init didn't kill all its children:\n%v", string(b)) - } - - if _, err := task.Delete(ctx, WithProcessKill); err != nil { - t.Error(err) - } -} - func TestContainerKillInitPidHost(t *testing.T) { initContainerAndCheckChildrenDieOnKill(t, oci.WithHostNamespace(specs.PIDNamespace)) } -func TestContainerKillInitKillsChildWhenNotHostPid(t *testing.T) { - initContainerAndCheckChildrenDieOnKill(t) -} - func TestUserNamespaces(t *testing.T) { t.Parallel() t.Run("WritableRootFS", func(t *testing.T) { testUserNamespaces(t, false) }) @@ -1683,110 +1235,6 @@ func testUserNamespaces(t *testing.T, readonlyRootFS bool) { } } -func TestTaskResize(t *testing.T) { - t.Parallel() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - var ( - image Image - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err = client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7))) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - task, err := container.NewTask(ctx, empty()) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - statusC, err := task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - if err := task.Resize(ctx, 32, 32); err != nil { - t.Fatal(err) - } - task.Kill(ctx, syscall.SIGKILL) - <-statusC -} - -func TestContainerImage(t *testing.T) { - t.Parallel() - - ctx, cancel := testContext(t) - defer cancel() - id := t.Name() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - image, err := client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - - container, err := client.NewContainer(ctx, id, WithNewSpec(), WithImage(image)) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx) - - i, err := container.Image(ctx) - if err != nil { - t.Fatal(err) - } - if i.Name() != image.Name() { - t.Fatalf("expected container image name %s but received %s", image.Name(), i.Name()) - } -} - -func TestContainerNoImage(t *testing.T) { - t.Parallel() - - ctx, cancel := testContext(t) - defer cancel() - id := t.Name() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - container, err := client.NewContainer(ctx, id, WithNewSpec()) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx) - - _, err = container.Image(ctx) - if err == nil { - t.Fatal("error should not be nil when container is created without an image") - } - if !errdefs.IsNotFound(err) { - t.Fatalf("expected error to be %s but received %s", errdefs.ErrNotFound, err) - } -} - func TestUIDNoGID(t *testing.T) { t.Parallel() @@ -1939,55 +1387,6 @@ func TestBindLowPortNonOpt(t *testing.T) { } } -func TestContainerNoSTDIN(t *testing.T) { - t.Parallel() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - var ( - image Image - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err = client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(0))) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(nil, ioutil.Discard, ioutil.Discard))) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - statusC, err := task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - if err := task.Start(ctx); err != nil { - t.Fatal(err) - } - status := <-statusC - code, _, err := status.Result() - if err != nil { - t.Fatal(err) - } - if code != 0 { - t.Errorf("expected status 0 from wait but received %d", code) - } -} - func TestShimOOMScore(t *testing.T) { containerdPid := ctrd.cmd.Process.Pid containerdScore, err := sys.GetOOMScoreAdj(containerdPid) @@ -2089,75 +1488,9 @@ func TestShimOOMScore(t *testing.T) { t.Fatal(err) } - <-statusC -} - -func TestTaskSpec(t *testing.T) { - t.Parallel() - - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - var ( - image Image - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err = client.GetImage(ctx, testImage) - if err != nil { - t.Fatal(err) - } - - container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), longCommand)) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - task, err := container.NewTask(ctx, empty()) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx) - - statusC, err := task.Wait(ctx) - if err != nil { - t.Fatal(err) - } - - spec, err := task.Spec(ctx) - if err != nil { - t.Fatal(err) - } - if spec == nil { - t.Fatal("spec from task is nil") - } - direct, err := newDirectIO(ctx, false) - if err != nil { - t.Fatal(err) - } - defer direct.Delete() - - lt, err := container.Task(ctx, direct.IOAttach) - if err != nil { - t.Fatal(err) - } - - spec, err = lt.Spec(ctx) - if err != nil { - t.Fatal(err) - } - if spec == nil { - t.Fatal("spec from loaded task is nil") - } - - if err := task.Kill(ctx, syscall.SIGKILL); err != nil { - t.Fatal(err) - } - <-statusC + select { + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for task exit event") + case <-statusC: + } } diff --git a/integration/client/container_test.go b/integration/client/container_test.go index 143f1d1..53cffb6 100644 --- a/integration/client/container_test.go +++ b/integration/client/container_test.go @@ -19,11 +19,11 @@ package client import ( "bytes" "context" + "fmt" "io" - "io/ioutil" "os" - "os/exec" "path" + "path/filepath" "runtime" "strings" "syscall" @@ -31,27 +31,31 @@ import ( "time" . "github.com/containerd/containerd" + apievents "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log/logtest" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" _ "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/v2/runc/options" - "github.com/containerd/typeurl" - specs "github.com/opencontainers/runtime-spec/specs-go" - - "github.com/containerd/containerd/errdefs" + "github.com/containerd/continuity/fs" "github.com/containerd/go-runc" + "github.com/containerd/typeurl" gogotypes "github.com/gogo/protobuf/types" + specs "github.com/opencontainers/runtime-spec/specs-go" + exec "golang.org/x/sys/execabs" ) func empty() cio.Creator { // TODO (@mlaventure) windows searches for pipes // when none are provided if runtime.GOOS == "windows" { - return cio.NewCreator(cio.WithStdio) + return cio.NewCreator(cio.WithStdio, cio.WithTerminal) } return cio.NullIO } @@ -171,6 +175,134 @@ func TestContainerStart(t *testing.T) { } } +func readShimPath(taskID string) (string, error) { + runtime := fmt.Sprintf("%s.%s", plugin.RuntimePluginV2, "task") + shimBinaryNamePath := filepath.Join(defaultState, runtime, testNamespace, taskID, "shim-binary-path") + + shimPath, err := os.ReadFile(shimBinaryNamePath) + if err != nil { + return "", err + } + return string(shimPath), nil +} + +func copyShim(shimPath string) (string, error) { + tempPath := filepath.Join(os.TempDir(), filepath.Base(shimPath)) + if err := fs.CopyFile(tempPath, shimPath); err != nil { + return "", err + } + + fi, err := os.Stat(shimPath) + if err != nil { + return "", err + } + if err := os.Chmod(tempPath, fi.Mode().Perm()); err != nil { + return "", err + } + + return tempPath, nil +} + +func TestContainerStartWithAbsRuntimePath(t *testing.T) { + t.Parallel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + if client.Runtime() == plugin.RuntimeLinuxV1 { + t.Skip("test relies on runtime v2") + } + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7))) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + // create a temp task to read the default shim path + task, err := container.NewTask(ctx, empty()) + if err != nil { + t.Fatal(err) + } + + defaultShimPath, err := readShimPath(task.ID()) + if err != nil { + t.Fatal(err) + } + + // remove the temp task + if _, err := task.Delete(ctx, WithProcessKill); err != nil { + t.Fatal(err) + } + + tempShimPath, err := copyShim(defaultShimPath) + if err != nil { + t.Fatal(err) + } + defer os.Remove(tempShimPath) + + task, err = container.NewTask(ctx, empty(), WithRuntimePath(tempShimPath)) + if err != nil { + t.Fatal(err) + } + + shimPath, err := readShimPath(task.ID()) + if err != nil { + t.Fatal(err) + } + if shimPath != tempShimPath { + t.Fatalf("The task's shim path is %s, does not used the specified runtime path: %s", shimPath, tempShimPath) + } + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + + if runtime.GOOS != "windows" { + // task.Pid not implemented on Windows + if pid := task.Pid(); pid < 1 { + t.Errorf("invalid task pid %d", pid) + } + } + + if err := task.Start(ctx); err != nil { + t.Error(err) + task.Delete(ctx) + return + } + status := <-statusC + code, _, err := status.Result() + if err != nil { + t.Fatal(err) + } + if code != 7 { + t.Errorf("expected status 7 from wait but received %d", code) + } + + deleteStatus, err := task.Delete(ctx) + if err != nil { + t.Fatal(err) + } + if ec := deleteStatus.ExitCode(); ec != 7 { + t.Errorf("expected status 7 from delete but received %d", ec) + } +} + func TestContainerOutput(t *testing.T) { t.Parallel() @@ -232,7 +364,7 @@ func TestContainerOutput(t *testing.T) { } func withByteBuffers(stdout io.Writer) cio.Opt { - // TODO: could this use ioutil.Discard? + // TODO: could this use io.Discard? return func(streams *cio.Streams) { streams.Stdin = new(bytes.Buffer) streams.Stdout = stdout @@ -443,9 +575,9 @@ func TestContainerPids(t *testing.T) { t.Fatal(err) } - pid := task.Pid() - if pid < 1 { - t.Errorf("invalid task pid %d", pid) + taskPid := task.Pid() + if taskPid < 1 { + t.Errorf("invalid task pid %d", taskPid) } processes, err := task.Pids(ctx) switch runtime.GOOS { @@ -459,12 +591,17 @@ func TestContainerPids(t *testing.T) { if l := len(processes); l != 2 { t.Errorf("expected 2 process but received %d", l) } - if len(processes) > 0 { - actual := processes[0].Pid - if pid != actual { - t.Errorf("expected pid %d but received %d", pid, actual) + + var found bool + for _, p := range processes { + if p.Pid == taskPid { + found = true + break } } + if !found { + t.Errorf("pid %d must be in %+v", taskPid, processes) + } } if err := task.Kill(ctx, syscall.SIGKILL); err != nil { select { @@ -511,7 +648,7 @@ func TestContainerCloseIO(t *testing.T) { t.Fatal(err) } - task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(r, stdout, ioutil.Discard))) + task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(r, stdout, io.Discard))) if err != nil { t.Fatal(err) } @@ -1299,9 +1436,6 @@ func TestDeleteContainerExecCreated(t *testing.T) { } func TestContainerMetrics(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("metrics are currently not supported on windows") - } t.Parallel() client, err := newClient(t, address) @@ -1356,9 +1490,6 @@ func TestContainerMetrics(t *testing.T) { } func TestDeletedContainerMetrics(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("metrics are currently not supported on windows") - } t.Parallel() client, err := newClient(t, address) @@ -1567,6 +1698,12 @@ func TestContainerLabels(t *testing.T) { } func TestContainerHook(t *testing.T) { + // OCI hooks aren't implemented on Windows. This test will actually run fine on Windows if there's a 'ps' binary in the users PATH, but + // there's not any actual hook functionality being tested as any of the OCI fields are plain ignored for Windows containers. + if runtime.GOOS == "windows" { + t.Skip() + } + t.Parallel() client, err := newClient(t, address) @@ -1834,3 +1971,704 @@ func withProcessTTY() cio.Opt { cio.WithTerminal(opt) } } + +// TestRegressionIssue4769 verifies the number of task exit events. +// +// Issue: https://github.com/containerd/containerd/issues/4769. +func TestRegressionIssue4769(t *testing.T) { + t.Parallel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + // use unique namespace to get unique task events + id := t.Name() + ns := fmt.Sprintf("%s-%s", testNamespace, id) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ctx = namespaces.WithNamespace(ctx, ns) + ctx = logtest.WithT(ctx, t) + + image, err := client.Pull(ctx, testImage, WithPullUnpack) + if err != nil { + t.Fatal(err) + } + defer client.ImageService().Delete(ctx, testImage, images.SynchronousDelete()) + + container, err := client.NewContainer(ctx, id, + WithNewSnapshot(id, image), + WithNewSpec(oci.WithImageConfig(image), withTrue()), + WithRuntime(client.Runtime(), nil), + ) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + task, err := container.NewTask(ctx, empty()) + if err != nil { + t.Fatal(err) + } + defer task.Delete(ctx) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + + eventStream, errC := client.EventService().Subscribe(ctx, "namespace=="+ns+",topic~=|^/tasks/exit|") + + if err := task.Start(ctx); err != nil { + t.Fatal(err) + } + + var timeout = 3 * time.Second + + select { + case et := <-statusC: + if got := et.ExitCode(); got != 0 { + t.Fatal(fmt.Errorf("expect zero exit status, but got %v", got)) + } + case <-time.After(timeout): + t.Fatal(fmt.Errorf("failed to get exit event in time")) + } + + // start to check events + select { + case et := <-eventStream: + if et.Event == nil { + t.Fatal(fmt.Errorf("unexpected empty event: %+v", et)) + } + + v, err := typeurl.UnmarshalAny(et.Event) + if err != nil { + t.Fatal(fmt.Errorf("failed to unmarshal event: %w", err)) + } + + if e, ok := v.(*apievents.TaskExit); !ok { + t.Fatal(fmt.Errorf("unexpected event type: %+v", v)) + } else if e.ExitStatus != 0 { + t.Fatal(fmt.Errorf("expect zero exit status, but got %v", e.ExitStatus)) + } + case err := <-errC: + t.Fatal(fmt.Errorf("unexpected error from event service: %w", err)) + + case <-time.After(timeout): + t.Fatal(fmt.Errorf("failed to get exit event in time")) + } + + if _, err := task.Delete(ctx); err != nil { + t.Fatal(err) + } + + // check duplicate event should not show up + select { + case event := <-eventStream: + t.Fatal(fmt.Errorf("unexpected exit event: %+v", event)) + case err := <-errC: + t.Fatal(fmt.Errorf("unexpected error from event service: %w", err)) + case <-time.After(timeout): + } +} + +// TestRegressionIssue6429 should not send exit event out if command is not found. +// +// Issue: https://github.com/containerd/containerd/issues/6429. +func TestRegressionIssue6429(t *testing.T) { + t.Parallel() + + if runtime.GOOS == "windows" { + t.Skip("Test relies on runc") + } + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + // use unique namespace to get unique task events + id := t.Name() + ns := fmt.Sprintf("%s-%s", testNamespace, id) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ctx = namespaces.WithNamespace(ctx, ns) + ctx = logtest.WithT(ctx, t) + + image, err := client.Pull(ctx, testImage, WithPullUnpack) + if err != nil { + t.Fatal(err) + } + defer client.ImageService().Delete(ctx, testImage, images.SynchronousDelete()) + + container, err := client.NewContainer(ctx, id, + WithNewSnapshot(id, image), + WithNewSpec(oci.WithImageConfig(image), withProcessArgs("notfound404")), + WithRuntime(client.Runtime(), nil), + ) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + eventStream, errC := client.EventService().Subscribe(ctx, "namespace=="+ns+",topic~=|^/tasks/exit|") + + if _, err := container.NewTask(ctx, empty()); err == nil { + t.Fatalf("expected error but got nil") + } + + var timeout = 10 * time.Second + + // start to check events + select { + case et := <-eventStream: + t.Fatal(fmt.Errorf("unexpected task exit event: %+v", et)) + case err := <-errC: + t.Fatal(fmt.Errorf("unexpected error from event service: %w", err)) + + case <-time.After(timeout): + } +} + +func TestDaemonRestart(t *testing.T) { + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + + container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), longCommand)) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + task, err := container.NewTask(ctx, empty()) + if err != nil { + t.Fatal(err) + } + defer task.Delete(ctx) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + + if err := task.Start(ctx); err != nil { + t.Fatal(err) + } + + var exitStatus ExitStatus + if err := ctrd.Restart(func() { + exitStatus = <-statusC + }); err != nil { + t.Fatal(err) + } + + if exitStatus.Error() == nil { + t.Errorf(`first task.Wait() should have failed with "transport is closing"`) + } + + waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) + serving, err := client.IsServing(waitCtx) + waitCancel() + if !serving { + t.Fatalf("containerd did not start within 2s: %v", err) + } + + statusC, err = task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + t.Fatal(err) + } + + <-statusC +} + +type directIO struct { + cio.DirectIO +} + +// ioCreate returns IO available for use with task creation +func (f *directIO) IOCreate(id string) (cio.IO, error) { + return f, nil +} + +// ioAttach returns IO available for use with task attachment +func (f *directIO) IOAttach(set *cio.FIFOSet) (cio.IO, error) { + return f, nil +} + +func (f *directIO) Cancel() { + // nothing to cancel as all operations are handled externally +} + +// Close closes all open fds +func (f *directIO) Close() error { + err := f.Stdin.Close() + if f.Stdout != nil { + if err2 := f.Stdout.Close(); err == nil { + err = err2 + } + } + if f.Stderr != nil { + if err2 := f.Stderr.Close(); err == nil { + err = err2 + } + } + return err +} + +// Delete removes the underlying directory containing fifos +func (f *directIO) Delete() error { + return f.DirectIO.Close() +} + +func initContainerAndCheckChildrenDieOnKill(t *testing.T, opts ...oci.SpecOpts) { + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + + opts = append(opts, oci.WithImageConfig(image)) + opts = append(opts, longCommand) + + container, err := client.NewContainer(ctx, id, + WithNewSnapshot(id, image), + WithNewSpec(opts...), + ) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + stdout := bytes.NewBuffer(nil) + task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout))) + if err != nil { + t.Fatal(err) + } + defer task.Delete(ctx) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + + if err := task.Start(ctx); err != nil { + t.Fatal(err) + } + + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + t.Error(err) + } + + // Give the shim time to reap the init process and kill the orphans + select { + case <-statusC: + case <-time.After(100 * time.Millisecond): + } + + command := []string{"ps", "ax"} + if runtime.GOOS == "windows" { + command = []string{"tasklist"} + } + b, err := exec.Command(command[0], command[1:]...).CombinedOutput() + if err != nil { + t.Fatal(err) + } + + // The container is using longCommand, which contains sleep 1 on Linux, and ping -t localhost on Windows. + if strings.Contains(string(b), "sleep 1") || strings.Contains(string(b), "ping -t localhost") { + t.Fatalf("killing init didn't kill all its children:\n%v", string(b)) + } + + if _, err := task.Delete(ctx, WithProcessKill); err != nil { + t.Error(err) + } +} + +func TestContainerKillInitKillsChildWhenNotHostPid(t *testing.T) { + initContainerAndCheckChildrenDieOnKill(t) +} + +func TestTaskResize(t *testing.T) { + t.Parallel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7))) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + task, err := container.NewTask(ctx, empty()) + if err != nil { + t.Fatal(err) + } + defer task.Delete(ctx) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + if err := task.Resize(ctx, 32, 32); err != nil { + t.Fatal(err) + } + task.Kill(ctx, syscall.SIGKILL) + <-statusC +} + +func TestContainerImage(t *testing.T) { + t.Parallel() + + ctx, cancel := testContext(t) + defer cancel() + id := t.Name() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + image, err := client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + + container, err := client.NewContainer(ctx, id, WithNewSpec(), WithImage(image)) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx) + + i, err := container.Image(ctx) + if err != nil { + t.Fatal(err) + } + if i.Name() != image.Name() { + t.Fatalf("expected container image name %s but received %s", image.Name(), i.Name()) + } +} + +func TestContainerNoImage(t *testing.T) { + t.Parallel() + + ctx, cancel := testContext(t) + defer cancel() + id := t.Name() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + container, err := client.NewContainer(ctx, id, WithNewSpec()) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx) + + _, err = container.Image(ctx) + if err == nil { + t.Fatal("error should not be nil when container is created without an image") + } + if !errdefs.IsNotFound(err) { + t.Fatalf("expected error to be %s but received %s", errdefs.ErrNotFound, err) + } +} + +func TestContainerNoSTDIN(t *testing.T) { + t.Parallel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(0))) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(nil, io.Discard, io.Discard))) + if err != nil { + t.Fatal(err) + } + defer task.Delete(ctx) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + if err := task.Start(ctx); err != nil { + t.Fatal(err) + } + status := <-statusC + code, _, err := status.Result() + if err != nil { + t.Fatal(err) + } + if code != 0 { + t.Errorf("expected status 0 from wait but received %d", code) + } +} + +func TestTaskSpec(t *testing.T) { + t.Parallel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + + container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), longCommand)) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + task, err := container.NewTask(ctx, empty()) + if err != nil { + t.Fatal(err) + } + defer task.Delete(ctx) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + + spec, err := task.Spec(ctx) + if err != nil { + t.Fatal(err) + } + if spec == nil { + t.Fatal("spec from task is nil") + } + direct, err := newDirectIO(ctx, false) + if err != nil { + t.Fatal(err) + } + defer direct.Delete() + + lt, err := container.Task(ctx, direct.IOAttach) + if err != nil { + t.Fatal(err) + } + + spec, err = lt.Spec(ctx) + if err != nil { + t.Fatal(err) + } + if spec == nil { + t.Fatal("spec from loaded task is nil") + } + + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + t.Fatal(err) + } + <-statusC +} + +func TestContainerUsername(t *testing.T) { + t.Parallel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + + username := "www-data" + command := []string{ + "id", "-u", + } + expectedOutput := "33" + if runtime.GOOS == "windows" { + username = "ContainerUser" + command = []string{ + "echo", `%USERNAME%`, + } + expectedOutput = "ContainerUser" + } + + // the www-data user in the busybox image has a uid of 33 + container, err := client.NewContainer(ctx, id, + WithNewSnapshot(id, image), + WithNewSpec(oci.WithImageConfig(image), oci.WithUsername(username), withProcessArgs(command...)), + ) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + buf := bytes.NewBuffer(nil) + task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(buf))) + if err != nil { + t.Fatal(err) + } + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + + if err := task.Start(ctx); err != nil { + t.Fatal(err) + } + <-statusC + if _, err := task.Delete(ctx); err != nil { + t.Fatal(err) + } + + output := strings.TrimSuffix(buf.String(), newLine) + if output != expectedOutput { + t.Errorf("expected %s uid to be %s but received %q", username, expectedOutput, buf.String()) + } +} + +func TestContainerPTY(t *testing.T) { + t.Parallel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + var ( + image Image + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err = client.GetImage(ctx, testImage) + if err != nil { + t.Fatal(err) + } + + container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithTTY, withProcessArgs("echo", "hello"))) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, WithSnapshotCleanup) + + buf := bytes.NewBuffer(nil) + task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(buf), withProcessTTY())) + if err != nil { + t.Fatal(err) + } + defer task.Delete(ctx) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Error(err) + } + + if err := task.Start(ctx); err != nil { + t.Fatal(err) + } + + <-statusC + + if _, err := task.Delete(ctx); err != nil { + t.Fatal(err) + } + + out := buf.String() + if !strings.ContainsAny(fmt.Sprintf("%#q", out), `\x00`) { + t.Fatal(`expected \x00 in output`) + } +} diff --git a/integration/client/content_test.go b/integration/client/content_test.go index c4aa1c0..6ad760e 100644 --- a/integration/client/content_test.go +++ b/integration/client/content_test.go @@ -27,7 +27,6 @@ import ( "github.com/containerd/containerd/content/testsuite" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" ) func newContentStore(ctx context.Context, root string) (context.Context, content.Store, func() error, error) { @@ -59,7 +58,7 @@ func newContentStore(ctx context.Context, root string) (context.Context, content } for _, st := range statuses { if err := cs.Abort(ctx, st.Ref); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to abort %s", st.Ref) + return fmt.Errorf("failed to abort %s: %w", st.Ref, err) } } err = cs.Walk(ctx, func(info content.Info) error { diff --git a/integration/client/convert_test.go b/integration/client/convert_test.go index 8b47064..8451d89 100644 --- a/integration/client/convert_test.go +++ b/integration/client/convert_test.go @@ -80,10 +80,6 @@ func TestConvert(t *testing.T) { t.Fatal(err) } for _, l := range mani.Layers { - if plats[0].OS == "windows" { - assert.Equal(t, ocispec.MediaTypeImageLayerNonDistributable, l.MediaType) - } else { - assert.Equal(t, ocispec.MediaTypeImageLayer, l.MediaType) - } + assert.Equal(t, ocispec.MediaTypeImageLayer, l.MediaType) } } diff --git a/integration/client/daemon_config_linux_test.go b/integration/client/daemon_config_linux_test.go index d62e938..bfec810 100644 --- a/integration/client/daemon_config_linux_test.go +++ b/integration/client/daemon_config_linux_test.go @@ -18,12 +18,8 @@ package client import ( "bufio" - "bytes" - "context" "fmt" - "io/ioutil" "os" - "os/exec" "path/filepath" "strings" "syscall" @@ -33,101 +29,13 @@ import ( "github.com/containerd/cgroups" . "github.com/containerd/containerd" "github.com/containerd/containerd/oci" - "github.com/containerd/containerd/pkg/testutil" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime/v2/runc/options" - srvconfig "github.com/containerd/containerd/services/server/config" ) -// the following nolint is for shutting up gometalinter on non-linux. -// nolint: unused -func newDaemonWithConfig(t *testing.T, configTOML string) (*Client, *daemon, func()) { - if testing.Short() { - t.Skip() - } - testutil.RequiresRoot(t) - var ( - ctrd = daemon{} - configTOMLDecoded srvconfig.Config - buf = bytes.NewBuffer(nil) - ) - - tempDir, err := ioutil.TempDir("", "containerd-test-new-daemon-with-config") - if err != nil { - t.Fatal(err) - } - defer func() { - if err != nil { - os.RemoveAll(tempDir) - } - }() - - configTOMLFile := filepath.Join(tempDir, "config.toml") - if err = ioutil.WriteFile(configTOMLFile, []byte(configTOML), 0600); err != nil { - t.Fatal(err) - } - - if err = srvconfig.LoadConfig(configTOMLFile, &configTOMLDecoded); err != nil { - t.Fatal(err) - } - - address := configTOMLDecoded.GRPC.Address - if address == "" { - address = filepath.Join(tempDir, "containerd.sock") - } - args := []string{"-c", configTOMLFile} - if configTOMLDecoded.Root == "" { - args = append(args, "--root", filepath.Join(tempDir, "root")) - } - if configTOMLDecoded.State == "" { - args = append(args, "--state", filepath.Join(tempDir, "state")) - } - if err = ctrd.start("containerd", address, args, buf, buf); err != nil { - t.Fatalf("%v: %s", err, buf.String()) - } - - waitCtx, waitCancel := context.WithTimeout(context.TODO(), 2*time.Second) - client, err := ctrd.waitForStart(waitCtx) - waitCancel() - if err != nil { - ctrd.Kill() - ctrd.Wait() - t.Fatalf("%v: %s", err, buf.String()) - } - - cleanup := func() { - if err := client.Close(); err != nil { - t.Fatalf("failed to close client: %v", err) - } - if err := ctrd.Stop(); err != nil { - if err := ctrd.Kill(); err != nil { - t.Fatalf("failed to signal containerd: %v", err) - } - } - if err := ctrd.Wait(); err != nil { - if _, ok := err.(*exec.ExitError); !ok { - t.Fatalf("failed to wait for: %v", err) - } - } - if err := os.RemoveAll(tempDir); err != nil { - t.Fatalf("failed to remove %s: %v", tempDir, err) - } - // cleaning config-specific resources is up to the caller - } - return client, &ctrd, cleanup -} - // TestDaemonRuntimeRoot ensures plugin.linux.runtime_root is not ignored func TestDaemonRuntimeRoot(t *testing.T) { - runtimeRoot, err := ioutil.TempDir("", "containerd-test-runtime-root") - if err != nil { - t.Fatal(err) - } - defer func() { - if err != nil { - os.RemoveAll(runtimeRoot) - } - }() + runtimeRoot := t.TempDir() configTOML := ` version = 2 [plugins] diff --git a/integration/client/daemon_test.go b/integration/client/daemon_test.go index bf746c2..8b7e22f 100644 --- a/integration/client/daemon_test.go +++ b/integration/client/daemon_test.go @@ -18,13 +18,15 @@ package client import ( "context" + "errors" + "fmt" "io" - "os/exec" + "runtime" "sync" "syscall" . "github.com/containerd/containerd" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) type daemon struct { @@ -45,7 +47,7 @@ func (d *daemon) start(name, address string, args []string, stdout, stderr io.Wr cmd.Stderr = stderr if err := cmd.Start(); err != nil { cmd.Wait() - return errors.Wrap(err, "failed to start daemon") + return fmt.Errorf("failed to start daemon: %w", err) } d.addr = address d.cmd = cmd @@ -110,9 +112,13 @@ func (d *daemon) Restart(stopCb func()) error { return errors.New("daemon is not running") } + signal := syscall.SIGTERM + if runtime.GOOS == "windows" { + signal = syscall.SIGKILL + } var err error - if err = d.cmd.Process.Signal(syscall.SIGTERM); err != nil { - return errors.Wrap(err, "failed to signal daemon") + if err = d.cmd.Process.Signal(signal); err != nil { + return fmt.Errorf("failed to signal daemon: %w", err) } d.cmd.Wait() @@ -126,7 +132,7 @@ func (d *daemon) Restart(stopCb func()) error { cmd.Stderr = d.cmd.Stderr if err := cmd.Start(); err != nil { cmd.Wait() - return errors.Wrap(err, "failed to start new daemon instance") + return fmt.Errorf("failed to start new daemon instance: %w", err) } d.cmd = cmd diff --git a/integration/client/export_test.go b/integration/client/export_test.go index 1e661b9..684cf3f 100644 --- a/integration/client/export_test.go +++ b/integration/client/export_test.go @@ -19,12 +19,18 @@ package client import ( "archive/tar" "bytes" + "context" + "encoding/json" "io" + "os" "testing" . "github.com/containerd/containerd" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // TestExport exports testImage as a tar stream @@ -50,14 +56,103 @@ func TestExport(t *testing.T) { if err != nil { t.Fatal(err) } - assertOCITar(t, bytes.NewReader(wb.Bytes())) + assertOCITar(t, bytes.NewReader(wb.Bytes()), true) } -func assertOCITar(t *testing.T, r io.Reader) { +// TestExportDockerManifest exports testImage as a tar stream, using the +// WithSkipDockerManifest option +func TestExportDockerManifest(t *testing.T) { + if testing.Short() { + t.Skip() + } + ctx, cancel := testContext(t) + defer cancel() + + client, err := New(address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + _, err = client.Fetch(ctx, testImage) + if err != nil { + t.Fatal(err) + } + dstFile, err := os.CreateTemp("", "export-import-test") + if err != nil { + t.Fatal(err) + } + defer func() { + dstFile.Close() + os.Remove(dstFile.Name()) + }() + + img, err := client.ImageService().Get(ctx, testImage) + if err != nil { + t.Fatal(err) + } + + // test multi-platform export + err = client.Export(ctx, dstFile, archive.WithManifest(img.Target), archive.WithSkipDockerManifest()) + if err != nil { + t.Fatal(err) + } + dstFile.Seek(0, 0) + assertOCITar(t, dstFile, false) + + // reset to beginning + dstFile.Seek(0, 0) + + // test single-platform export + var result ocispec.Descriptor + err = images.Walk(ctx, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, client.ContentStore(), desc) + if err != nil { + return nil, err + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + if desc.Platform == nil || platforms.Default().Match(platforms.Normalize(*desc.Platform)) { + result = desc + } + return nil, nil + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, client.ContentStore(), desc) + if err != nil { + return nil, err + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + return idx.Manifests, nil + } + return nil, nil + }), img.Target) + if err != nil { + t.Fatal(err) + } + err = client.Export(ctx, dstFile, archive.WithManifest(result), archive.WithSkipDockerManifest()) + if err != nil { + t.Fatal(err) + } + dstFile.Seek(0, 0) + assertOCITar(t, dstFile, false) +} + +func assertOCITar(t *testing.T, r io.Reader, docker bool) { // TODO: add more assertion tr := tar.NewReader(r) foundOCILayout := false foundIndexJSON := false + foundManifestJSON := false for { h, err := tr.Next() if err == io.EOF { @@ -73,6 +168,9 @@ func assertOCITar(t *testing.T, r io.Reader) { if h.Name == "index.json" { foundIndexJSON = true } + if h.Name == "manifest.json" { + foundManifestJSON = true + } } if !foundOCILayout { t.Error("oci-layout not found") @@ -80,4 +178,9 @@ func assertOCITar(t *testing.T, r io.Reader) { if !foundIndexJSON { t.Error("index.json not found") } + if docker && !foundManifestJSON { + t.Error("manifest.json not found") + } else if !docker && foundManifestJSON { + t.Error("manifest.json found") + } } diff --git a/integration/client/go.mod b/integration/client/go.mod index 66c4679..685e6d4 100644 --- a/integration/client/go.mod +++ b/integration/client/go.mod @@ -1,24 +1,56 @@ module github.com/containerd/containerd/integration/client -go 1.15 +go 1.19 require ( - github.com/Microsoft/hcsshim v0.8.23 + github.com/Microsoft/hcsshim v0.9.10 github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1 - github.com/containerd/cgroups v1.0.1 + github.com/containerd/cgroups v1.0.4 // the actual version of containerd is replaced with the code at the root of this repository - github.com/containerd/containerd v1.5.1 + github.com/containerd/containerd v1.6.18 + github.com/containerd/continuity v0.3.0 github.com/containerd/go-runc v1.0.0 - github.com/containerd/ttrpc v1.1.0 + github.com/containerd/ttrpc v1.1.2 github.com/containerd/typeurl v1.0.2 github.com/gogo/protobuf v1.3.2 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.2 + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 - github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.8.1 - golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 - gotest.tools/v3 v3.0.3 + github.com/sirupsen/logrus v1.9.0 + golang.org/x/sys v0.6.0 + gotest.tools/v3 v3.5.0 +) + +require ( + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/cilium/ebpf v0.7.0 // indirect + github.com/containerd/console v1.0.3 // indirect + github.com/containerd/fifo v1.0.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/godbus/dbus/v5 v5.0.6 // indirect + github.com/gogo/googleapis v1.4.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/klauspost/compress v1.11.13 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/signal v0.6.0 // indirect + github.com/opencontainers/runc v1.1.5 // indirect + github.com/opencontainers/selinux v1.10.1 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pkg/errors v0.9.1 // indirect + go.opencensus.io v0.23.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/text v0.8.0 // indirect + google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect + google.golang.org/grpc v1.50.1 // indirect + google.golang.org/protobuf v1.28.1 // indirect ) replace ( @@ -33,9 +65,10 @@ replace ( // root, because that's the actual version expected by the "containerd/containerd" // dependency above. github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2 - github.com/golang/protobuf => github.com/golang/protobuf v1.3.5 + + // prevent go mod from rolling this back to the last tagged release; see https://github.com/containerd/containerd/pull/6739 + github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092 github.com/urfave/cli => github.com/urfave/cli v1.22.1 google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 - google.golang.org/grpc => google.golang.org/grpc v1.27.1 ) diff --git a/integration/client/go.sum b/integration/client/go.sum index d7a29cb..3276980 100644 --- a/integration/client/go.sum +++ b/integration/client/go.sum @@ -1,4 +1,5 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -9,157 +10,213 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.10 h1:TxXGNmcbQxBKVWvjvTocNb6jrPyeHlk5EiDhhgHgggs= +github.com/Microsoft/hcsshim v0.9.10/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1 h1:pVKfKyPkXna29XlGjxSr9J0A7vNucOUHZ/2ClcTWalw= github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1/go.mod h1:Cmvnhlie15Ha2UYrJs9EhgSx76Bq9RV2FgfEiT78GhI= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.1.2 h1:4jH6OQDQqjfVD2b5TJS5TxmGuLGmp5WW7KtW2TWOP7c= +github.com/containerd/ttrpc v1.1.2/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -167,23 +224,38 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c h1:RBUpb2b14UnmRHNd2uHz20ZHLDK+SW5Us/vWF5IHRaY= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.3.2 h1:kX1es4djPJrsDhY7aZKJy7aZasdcB5oSOEphMjSB53c= github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -191,77 +263,136 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -278,67 +409,102 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -346,12 +512,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -361,83 +530,149 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -445,7 +680,11 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -458,6 +697,7 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -474,9 +714,16 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -487,8 +734,8 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -497,28 +744,57 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -531,9 +807,9 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -554,39 +830,86 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -595,9 +918,12 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -612,13 +938,33 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -629,30 +975,70 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -661,42 +1047,59 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/integration/client/helpers_unix_test.go b/integration/client/helpers_unix_test.go index f95297e..5670ade 100644 --- a/integration/client/helpers_unix_test.go +++ b/integration/client/helpers_unix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -22,6 +23,7 @@ import ( "context" "fmt" + "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -55,3 +57,15 @@ func withExecExitStatus(s *specs.Process, es int) { func withExecArgs(s *specs.Process, args ...string) { s.Args = args } + +func newDirectIO(ctx context.Context, terminal bool) (*directIO, error) { + fifos, err := cio.NewFIFOSetInDir("", "", terminal) + if err != nil { + return nil, err + } + dio, err := cio.NewDirectIO(ctx, fifos) + if err != nil { + return nil, err + } + return &directIO{DirectIO: *dio}, nil +} diff --git a/integration/client/helpers_windows_test.go b/integration/client/helpers_windows_test.go index 7764745..3616c09 100644 --- a/integration/client/helpers_windows_test.go +++ b/integration/client/helpers_windows_test.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,9 +17,12 @@ package client import ( + "bytes" "context" + "io" "strconv" + "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -55,3 +56,21 @@ func withExecExitStatus(s *specs.Process, es int) { func withExecArgs(s *specs.Process, args ...string) { s.Args = append([]string{"cmd", "/c"}, args...) } + +type bytesBuffer struct { + *bytes.Buffer +} + +// Close is a noop operation. +func (b bytesBuffer) Close() error { + return nil +} + +func newDirectIO(ctx context.Context, terminal bool) (*directIO, error) { + readb := bytesBuffer{bytes.NewBuffer(nil)} + writeb := io.NopCloser(bytes.NewBuffer(nil)) + errb := io.NopCloser(bytes.NewBuffer(nil)) + + dio := cio.NewDirectIO(readb, writeb, errb, terminal) + return &directIO{DirectIO: *dio}, nil +} diff --git a/integration/client/image_test.go b/integration/client/image_test.go index aeb6112..a3329b5 100644 --- a/integration/client/image_test.go +++ b/integration/client/image_test.go @@ -31,7 +31,7 @@ import ( ) func TestImageIsUnpacked(t *testing.T) { - const imageName = "k8s.gcr.io/pause:3.5" + const imageName = "k8s.gcr.io/pause:3.6" ctx, cancel := testContext(t) defer cancel() @@ -79,9 +79,9 @@ func TestImageIsUnpacked(t *testing.T) { func TestImagePullWithDistSourceLabel(t *testing.T) { var ( - source = "k8s.gcr.io" + source = "registry.k8s.io" repoName = "pause" - tag = "3.5" + tag = "3.6" ) ctx, cancel := testContext(t) @@ -137,7 +137,7 @@ func TestImageUsage(t *testing.T) { t.Skip() } - imageName := "k8s.gcr.io/pause:3.5" + imageName := "k8s.gcr.io/pause:3.6" ctx, cancel := testContext(t) defer cancel() @@ -231,7 +231,7 @@ func TestImageUsage(t *testing.T) { func TestImageSupportedBySnapshotter_Error(t *testing.T) { var unsupportedImage string if runtime.GOOS == "windows" { - unsupportedImage = "k8s.gcr.io/pause-amd64:3.2" + unsupportedImage = "registry.k8s.io/pause-amd64:3.2" } else { unsupportedImage = "mcr.microsoft.com/windows/nanoserver:1809" } diff --git a/integration/client/import_test.go b/integration/client/import_test.go index 22d5603..661024b 100644 --- a/integration/client/import_test.go +++ b/integration/client/import_test.go @@ -22,16 +22,21 @@ import ( "encoding/json" "io" - "io/ioutil" "math/rand" "reflect" + "runtime" "testing" + "time" . "github.com/containerd/containerd" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/archive/tartest" "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -40,6 +45,18 @@ import ( // TestExportAndImport exports testImage as a tar stream, // and import the tar stream as a new image. func TestExportAndImport(t *testing.T) { + testExportImport(t, testImage) +} + +// TestExportAndImportMultiLayer exports testMultiLayeredImage as a tar stream, +// and import the tar stream as a new image. This should ensure that imported +// images remain sane, and that the Garbage Collector won't delete part of its +// content. +func TestExportAndImportMultiLayer(t *testing.T) { + testExportImport(t, testMultiLayeredImage) +} + +func testExportImport(t *testing.T, imageName string) { if testing.Short() { t.Skip() } @@ -52,17 +69,19 @@ func TestExportAndImport(t *testing.T) { } defer client.Close() - _, err = client.Fetch(ctx, testImage) + _, err = client.Fetch(ctx, imageName) if err != nil { t.Fatal(err) } wb := bytes.NewBuffer(nil) - err = client.Export(ctx, wb, archive.WithAllPlatforms(), archive.WithImage(client.ImageService(), testImage)) + err = client.Export(ctx, wb, archive.WithPlatform(platforms.Default()), archive.WithImage(client.ImageService(), imageName)) if err != nil { t.Fatal(err) } + client.ImageService().Delete(ctx, imageName) + opts := []ImportOpt{ WithImageRefTranslator(archive.AddRefPrefix("foo/bar")), } @@ -71,6 +90,41 @@ func TestExportAndImport(t *testing.T) { t.Fatalf("Import failed: %+v", err) } + // We need to unpack the image, especially if it's multilayered. + for _, img := range imgrecs { + image := NewImage(client, img) + + // TODO: Show unpack status + t.Logf("unpacking %s (%s)...", img.Name, img.Target.Digest) + err = image.Unpack(ctx, "") + if err != nil { + t.Fatalf("Error while unpacking image: %+v", err) + } + t.Log("done") + } + + // we're triggering the Garbage Collector to do its job. + ls := client.LeasesService() + l, err := ls.Create(ctx, leases.WithRandomID(), leases.WithExpiration(time.Hour)) + if err != nil { + t.Fatalf("Error while creating lease: %+v", err) + } + if err = ls.Delete(ctx, l, leases.SynchronousDelete); err != nil { + t.Fatalf("Error while deleting lease: %+v", err) + } + + image, err := client.GetImage(ctx, imageName) + if err != nil { + t.Fatal(err) + } + + id := t.Name() + container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image))) + if err != nil { + t.Fatalf("Error while creating container: %+v", err) + } + container.Delete(ctx, WithSnapshotCleanup) + for _, imgrec := range imgrecs { if imgrec.Name == testImage { continue @@ -86,7 +140,7 @@ func TestImport(t *testing.T) { ctx, cancel := testContext(t) defer cancel() - client, err := New(address) + client, err := newClient(t, address) if err != nil { t.Fatal(err) } @@ -98,7 +152,8 @@ func TestImport(t *testing.T) { empty := []byte("{}") version := []byte("1.0") - c1, d2 := createConfig() + c1, d2 := createConfig(runtime.GOOS, runtime.GOARCH) + badConfig, _ := createConfig("foo", "lish") m1, d3, expManifest := createManifest(c1, [][]byte{b1}) @@ -172,6 +227,17 @@ func TestImport(t *testing.T) { checkManifest(ctx, t, imgs[0].Target, nil) }, }, + { + Name: "DockerV2.1-BadOSArch", + Writer: tartest.TarAll( + tc.Dir("bd765cd43e95212f7aa2cab51d0a", 0755), + tc.File("bd765cd43e95212f7aa2cab51d0a/json", empty, 0644), + tc.File("bd765cd43e95212f7aa2cab51d0a/layer.tar", b1, 0644), + tc.File("bd765cd43e95212f7aa2cab51d0a/VERSION", version, 0644), + tc.File("e95212f7aa2cab51d0abd765cd43.json", badConfig, 0644), + tc.File("manifest.json", []byte(`[{"Config":"e95212f7aa2cab51d0abd765cd43.json","RepoTags":["test-import:notlatest", "another/repo:tag"],"Layers":["bd765cd43e95212f7aa2cab51d0a/layer.tar"]}]`), 0644), + ), + }, { Name: "OCI-BadFormat", Writer: tartest.TarAll( @@ -285,7 +351,7 @@ func checkImages(t *testing.T, target digest.Digest, actual []images.Image, name } func createContent(size int64, seed int64) ([]byte, digest.Digest) { - b, err := ioutil.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), size)) + b, err := io.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), size)) if err != nil { panic(err) } @@ -302,10 +368,10 @@ func createContent(size int64, seed int64) ([]byte, digest.Digest) { return b, digest.FromBytes(b) } -func createConfig() ([]byte, digest.Digest) { +func createConfig(osName, archName string) ([]byte, digest.Digest) { image := ocispec.Image{ - OS: "any", - Architecture: "any", + OS: osName, + Architecture: archName, Author: "test", } b, _ := json.Marshal(image) diff --git a/integration/client/lease_test.go b/integration/client/lease_test.go index 9886f99..130ee5c 100644 --- a/integration/client/lease_test.go +++ b/integration/client/lease_test.go @@ -28,10 +28,6 @@ import ( ) func TestLeaseResources(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip() - } - ctx, cancel := testContext(t) defer cancel() @@ -41,11 +37,15 @@ func TestLeaseResources(t *testing.T) { } defer client.Close() + snapshotterName := "native" + if runtime.GOOS == "windows" { + snapshotterName = "windows" + } var ( ls = client.LeasesService() cs = client.ContentStore() imgSrv = client.ImageService() - sn = client.SnapshotService("native") + sn = client.SnapshotService(snapshotterName) ) l, err := ls.Create(ctx, leases.WithRandomID()) @@ -55,9 +55,9 @@ func TestLeaseResources(t *testing.T) { defer ls.Delete(ctx, l, leases.SynchronousDelete) // step 1: download image - imageName := "k8s.gcr.io/pause:3.5" + imageName := "k8s.gcr.io/pause:3.6" - image, err := client.Pull(ctx, imageName, WithPullUnpack, WithPullSnapshotter("native")) + image, err := client.Pull(ctx, imageName, WithPullUnpack, WithPullSnapshotter(snapshotterName)) if err != nil { t.Fatal(err) } @@ -86,7 +86,7 @@ func TestLeaseResources(t *testing.T) { // step 2: reference snapshotter with lease r := leases.Resource{ ID: chainID.String(), - Type: "snapshots/native", + Type: "snapshots/" + snapshotterName, } if err := ls.AddResource(ctx, l, r); err != nil { diff --git a/integration/client/restart_monitor_linux_test.go b/integration/client/restart_monitor_linux_test.go deleted file mode 100644 index aa37a1e..0000000 --- a/integration/client/restart_monitor_linux_test.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package client - -import ( - "context" - "fmt" - "syscall" - "testing" - "time" - - . "github.com/containerd/containerd" - "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/oci" -) - -// TestRestartMonitor tests restarting containers -// with the restart monitor service plugin -func TestRestartMonitor(t *testing.T) { - const ( - interval = 10 * time.Second - epsilon = 1 * time.Second - ) - configTOML := fmt.Sprintf(` -version = 2 -[plugins] - [plugins."io.containerd.internal.v1.restart"] - interval = "%s" -`, interval.String()) - client, _, cleanup := newDaemonWithConfig(t, configTOML) - defer cleanup() - - var ( - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - - image, err := client.Pull(ctx, testImage, WithPullUnpack) - if err != nil { - t.Fatal(err) - } - - container, err := client.NewContainer(ctx, id, - WithNewSnapshot(id, image), - WithNewSpec( - oci.WithImageConfig(image), - withProcessArgs("sleep", "infinity"), - ), - withRestartStatus(Running), - ) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, WithSnapshotCleanup) - - task, err := container.NewTask(ctx, empty()) - if err != nil { - t.Fatal(err) - } - defer task.Delete(ctx, WithProcessKill) - - if err := task.Start(ctx); err != nil { - t.Fatal(err) - } - - task.Kill(ctx, syscall.SIGKILL) - begin := time.Now() - deadline := begin.Add(interval).Add(epsilon) - for time.Now().Before(deadline) { - status, err := task.Status(ctx) - now := time.Now() - if err != nil { - // ErrNotFound is expected here, because the restart monitor - // temporarily removes the task before restarting. - t.Logf("%v: err=%v", now, err) - } else { - t.Logf("%v: status=%q", now, status) - - if status.Status == Running { - elapsed := time.Since(begin) - t.Logf("the task was restarted within %s", elapsed.String()) - return - } - } - time.Sleep(epsilon) - } - t.Fatalf("the task was not restarted in %s + %s", - interval.String(), epsilon.String()) -} - -// withRestartStatus is a copy of "github.com/containerd/containerd/runtime/restart".WithStatus. -// This copy is needed because `go test` refuses circular imports. -func withRestartStatus(status ProcessStatus) func(context.Context, *Client, *containers.Container) error { - return func(_ context.Context, _ *Client, c *containers.Container) error { - if c.Labels == nil { - c.Labels = make(map[string]string) - } - c.Labels["containerd.io/restart.status"] = string(status) - return nil - } -} diff --git a/integration/client/restart_monitor_test.go b/integration/client/restart_monitor_test.go new file mode 100644 index 0000000..c97b32d --- /dev/null +++ b/integration/client/restart_monitor_test.go @@ -0,0 +1,234 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package client + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "syscall" + "testing" + "time" + + . "github.com/containerd/containerd" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/pkg/testutil" + srvconfig "github.com/containerd/containerd/services/server/config" + "github.com/containerd/containerd/sys" + exec "golang.org/x/sys/execabs" +) + +//nolint:unused // Ignore on non-Linux +func newDaemonWithConfig(t *testing.T, configTOML string) (*Client, *daemon, func()) { + if testing.Short() { + t.Skip() + } + testutil.RequiresRoot(t) + var ( + ctrd = daemon{} + configTOMLDecoded srvconfig.Config + buf = bytes.NewBuffer(nil) + ) + + tempDir := t.TempDir() + + configTOMLFile := filepath.Join(tempDir, "config.toml") + if err := os.WriteFile(configTOMLFile, []byte(configTOML), 0600); err != nil { + t.Fatal(err) + } + + if err := srvconfig.LoadConfig(configTOMLFile, &configTOMLDecoded); err != nil { + t.Fatal(err) + } + + address := configTOMLDecoded.GRPC.Address + if address == "" { + if runtime.GOOS == "windows" { + address = fmt.Sprintf(`\\.\pipe\containerd-containerd-test-%s`, filepath.Base(tempDir)) + } else { + address = filepath.Join(tempDir, "containerd.sock") + } + } + args := []string{"-c", configTOMLFile} + if configTOMLDecoded.Root == "" { + args = append(args, "--root", filepath.Join(tempDir, "root")) + } + if configTOMLDecoded.State == "" { + args = append(args, "--state", filepath.Join(tempDir, "state")) + } + if err := ctrd.start("containerd", address, args, buf, buf); err != nil { + t.Fatalf("%v: %s", err, buf.String()) + } + + waitCtx, waitCancel := context.WithTimeout(context.TODO(), 2*time.Second) + client, err := ctrd.waitForStart(waitCtx) + waitCancel() + if err != nil { + ctrd.Kill() + ctrd.Wait() + t.Fatalf("%v: %s", err, buf.String()) + } + + cleanup := func() { + if err := client.Close(); err != nil { + t.Errorf("failed to close client: %v", err) + } + if err := ctrd.Stop(); err != nil { + if err := ctrd.Kill(); err != nil { + t.Errorf("failed to signal containerd: %v", err) + } + } + if err := ctrd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + t.Errorf("failed to wait for: %v", err) + } + } + if err := sys.ForceRemoveAll(tempDir); err != nil { + t.Errorf("failed to remove %s: %v", tempDir, err) + } + if t.Failed() { + t.Log("Daemon output:\n", buf.String()) + } + + // cleaning config-specific resources is up to the caller + } + return client, &ctrd, cleanup +} + +// TestRestartMonitor tests restarting containers +// with the restart monitor service plugin +func TestRestartMonitor(t *testing.T) { + if runtime.GOOS == "windows" { + // This test on Windows encounters the following error in some environments: + // "The process cannot access the file because it is being used by another process. (0x20)" + // Skip this test until this error can be evaluated and the appropriate + // test fix or environment configuration can be determined. + t.Skip("Skipping flaky test on Windows") + } + const ( + interval = 10 * time.Second + epsilon = 1 * time.Second + count = 20 + ) + configTOML := fmt.Sprintf(` +version = 2 +[plugins] + [plugins."io.containerd.internal.v1.restart"] + interval = "%s" +`, interval.String()) + client, _, cleanup := newDaemonWithConfig(t, configTOML) + defer cleanup() + + var ( + ctx, cancel = testContext(t) + id = t.Name() + ) + defer cancel() + + image, err := client.Pull(ctx, testImage, WithPullUnpack) + if err != nil { + t.Fatal(err) + } + + container, err := client.NewContainer(ctx, id, + WithNewSnapshot(id, image), + WithNewSpec( + oci.WithImageConfig(image), + longCommand, + ), + withRestartStatus(Running), + ) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := container.Delete(ctx, WithSnapshotCleanup); err != nil { + t.Logf("failed to delete container: %v", err) + } + }() + + task, err := container.NewTask(ctx, empty()) + if err != nil { + t.Fatal(err) + } + defer func() { + if _, err := task.Delete(ctx, WithProcessKill); err != nil { + t.Logf("failed to delete task: %v", err) + } + }() + + if err := task.Start(ctx); err != nil { + t.Fatal(err) + } + + if err := task.Kill(ctx, syscall.SIGKILL); err != nil { + t.Fatal(err) + } + + begin := time.Now() + lastCheck := begin + + expected := begin.Add(interval).Add(epsilon) + + // Deadline determines when check for restart should be aborted. + deadline := begin.Add(interval).Add(epsilon * count) + for { + status, err := task.Status(ctx) + now := time.Now() + if err != nil { + // ErrNotFound is expected here, because the restart monitor + // temporarily removes the task before restarting. + t.Logf("%v: err=%v", now, err) + } else { + t.Logf("%v: status=%q", now, status.Status) + + if status.Status == Running { + break + } + } + + // lastCheck represents the last time the status was seen as not running + lastCheck = now + if lastCheck.After(deadline) { + t.Logf("%v: the task was not restarted", lastCheck) + return + } + time.Sleep(epsilon) + } + + // Use the last timestamp for when the process was seen as not running for the check + if lastCheck.After(expected) { + t.Fatalf("%v: the task was restarted, but it must be before %v", lastCheck, expected) + } + t.Logf("%v: the task was restarted since %v", time.Now(), lastCheck) +} + +// withRestartStatus is a copy of "github.com/containerd/containerd/runtime/restart".WithStatus. +// This copy is needed because `go test` refuses circular imports. +func withRestartStatus(status ProcessStatus) func(context.Context, *Client, *containers.Container) error { + return func(_ context.Context, _ *Client, c *containers.Container) error { + if c.Labels == nil { + c.Labels = make(map[string]string) + } + c.Labels["containerd.io/restart.status"] = string(status) + return nil + } +} diff --git a/integration/client/task_opts_unix_test.go b/integration/client/task_opts_unix_test.go index b65fb09..7fccee4 100644 --- a/integration/client/task_opts_unix_test.go +++ b/integration/client/task_opts_unix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/integration/common.go b/integration/common.go index 81995b0..ca0edf7 100644 --- a/integration/common.go +++ b/integration/common.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -20,11 +18,14 @@ package integration import ( "fmt" - "io/ioutil" + "os" + "testing" + cri "github.com/containerd/containerd/integration/cri-api/pkg/apis" "github.com/pelletier/go-toml" "github.com/sirupsen/logrus" - cri "k8s.io/cri-api/pkg/apis" + "github.com/stretchr/testify/require" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // ImageList holds public image references @@ -35,6 +36,7 @@ type ImageList struct { ResourceConsumer string VolumeCopyUp string VolumeOwnership string + ArgsEscaped string } var ( @@ -46,23 +48,24 @@ var ( func initImages(imageListFile string) { imageList = ImageList{ - Alpine: "docker.io/library/alpine:latest", - BusyBox: "docker.io/library/busybox:latest", - Pause: "k8s.gcr.io/pause:3.5", - ResourceConsumer: "k8s.gcr.io/e2e-test-images/resource-consumer:1.9", - VolumeCopyUp: "gcr.io/k8s-cri-containerd/volume-copy-up:2.0", - VolumeOwnership: "gcr.io/k8s-cri-containerd/volume-ownership:2.0", + Alpine: "ghcr.io/containerd/alpine:3.14.0", + BusyBox: "ghcr.io/containerd/busybox:1.28", + Pause: "registry.k8s.io/pause:3.6", + ResourceConsumer: "registry.k8s.io/e2e-test-images/resource-consumer:1.10", + VolumeCopyUp: "ghcr.io/containerd/volume-copy-up:2.1", + VolumeOwnership: "ghcr.io/containerd/volume-ownership:2.1", + ArgsEscaped: "cplatpublic.azurecr.io/args-escaped-test-image-ns:1.0", } if imageListFile != "" { - fileContent, err := ioutil.ReadFile(imageListFile) + fileContent, err := os.ReadFile(imageListFile) if err != nil { - panic(fmt.Errorf("Error reading '%v' file contents: %v", imageList, err)) + panic(fmt.Errorf("error reading '%v' file contents: %v", imageList, err)) } err = toml.Unmarshal(fileContent, &imageList) if err != nil { - panic(fmt.Errorf("Error unmarshalling '%v' TOML file: %v", imageList, err)) + panic(fmt.Errorf("error unmarshalling '%v' TOML file: %v", imageList, err)) } } @@ -87,6 +90,8 @@ const ( VolumeCopyUp // VolumeOwnership image VolumeOwnership + // Test image for ArgsEscaped windows bug + ArgsEscaped ) func initImageMap(imageList ImageList) map[int]string { @@ -97,6 +102,7 @@ func initImageMap(imageList ImageList) map[int]string { images[ResourceConsumer] = imageList.ResourceConsumer images[VolumeCopyUp] = imageList.VolumeCopyUp images[VolumeOwnership] = imageList.VolumeOwnership + images[ArgsEscaped] = imageList.ArgsEscaped return images } @@ -104,3 +110,20 @@ func initImageMap(imageList ImageList) map[int]string { func GetImage(image int) string { return imageMap[image] } + +// EnsureImageExists pulls the given image, ensures that no error was encountered +// while pulling it. +func EnsureImageExists(t *testing.T, imageName string) string { + img, err := imageService.ImageStatus(&runtime.ImageSpec{Image: imageName}) + require.NoError(t, err) + if img != nil { + t.Logf("Image %q already exists, not pulling.", imageName) + return img.Id + } + + t.Logf("Pull test image %q", imageName) + imgID, err := imageService.PullImage(&runtime.ImageSpec{Image: imageName}, nil, nil) + require.NoError(t, err) + + return imgID +} diff --git a/integration/container_log_test.go b/integration/container_log_test.go index 56aaa44..60de3e5 100644 --- a/integration/container_log_test.go +++ b/integration/container_log_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -20,7 +18,6 @@ package integration import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -29,35 +26,23 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestContainerLogWithoutTailingNewLine(t *testing.T) { - testPodLogDir, err := ioutil.TempDir("/tmp", "container-log-without-tailing-newline") - require.NoError(t, err) - defer os.RemoveAll(testPodLogDir) + testPodLogDir := t.TempDir() t.Log("Create a sandbox with log directory") - sbConfig := PodSandboxConfig("sandbox", "container-log-without-tailing-newline", + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "container-log-without-tailing-newline", WithPodLogDirectory(testPodLogDir), ) - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() var ( testImage = GetImage(BusyBox) containerName = "test-container" ) - t.Logf("Pull test image %q", testImage) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + + EnsureImageExists(t, testImage) t.Log("Create a container with log path") cnConfig := ContainerConfig( @@ -85,7 +70,7 @@ func TestContainerLogWithoutTailingNewLine(t *testing.T) { }, time.Second, 30*time.Second)) t.Log("Check container log") - content, err := ioutil.ReadFile(filepath.Join(testPodLogDir, containerName)) + content, err := os.ReadFile(filepath.Join(testPodLogDir, containerName)) assert.NoError(t, err) checkContainerLog(t, string(content), []string{ fmt.Sprintf("%s %s %s", runtime.Stdout, runtime.LogTagPartial, "abcd"), @@ -93,31 +78,19 @@ func TestContainerLogWithoutTailingNewLine(t *testing.T) { } func TestLongContainerLog(t *testing.T) { - testPodLogDir, err := ioutil.TempDir("/tmp", "long-container-log") - require.NoError(t, err) - defer os.RemoveAll(testPodLogDir) + testPodLogDir := t.TempDir() t.Log("Create a sandbox with log directory") - sbConfig := PodSandboxConfig("sandbox", "long-container-log", + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "long-container-log", WithPodLogDirectory(testPodLogDir), ) - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() var ( testImage = GetImage(BusyBox) containerName = "test-container" ) - t.Logf("Pull test image %q", testImage) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + + EnsureImageExists(t, testImage) t.Log("Create a container with log path") config, err := CRIConfig() @@ -152,7 +125,7 @@ func TestLongContainerLog(t *testing.T) { }, time.Second, 30*time.Second)) t.Log("Check container log") - content, err := ioutil.ReadFile(filepath.Join(testPodLogDir, containerName)) + content, err := os.ReadFile(filepath.Join(testPodLogDir, containerName)) assert.NoError(t, err) checkContainerLog(t, string(content), []string{ fmt.Sprintf("%s %s %s", runtime.Stdout, runtime.LogTagFull, strings.Repeat("a", maxSize-1)), diff --git a/integration/container_restart_test.go b/integration/container_restart_test.go index bce558e..044a4c1 100644 --- a/integration/container_restart_test.go +++ b/integration/container_restart_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -28,13 +26,10 @@ import ( // Test to verify container can be restarted func TestContainerRestart(t *testing.T) { t.Logf("Create a pod config and run sandbox container") - sbConfig := PodSandboxConfig("sandbox1", "restart") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox1", "restart") + + EnsureImageExists(t, pauseImage) + t.Logf("Create a container config and run container in a pod") containerConfig := ContainerConfig( "container1", @@ -60,3 +55,44 @@ func TestContainerRestart(t *testing.T) { require.NoError(t, err) require.NoError(t, runtimeService.StartContainer(cn)) } + +// Test to verify that, after a container fails to start due to a bad command, it can be removed +// and a proper container can be created and started in its stead. +func TestFailedContainerRestart(t *testing.T) { + t.Logf("Create a pod config and run sandbox container") + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox1", "restart") + + EnsureImageExists(t, pauseImage) + + t.Logf("Create a container config in a pod with a command that fails") + containerConfig := ContainerConfig( + "container1", + pauseImage, + WithCommand("something-that-doesnt-exist"), + WithTestLabels(), + WithTestAnnotations(), + ) + cn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig) + require.NoError(t, err) + defer func() { + assert.NoError(t, runtimeService.RemoveContainer(cn)) + }() + require.Error(t, runtimeService.StartContainer(cn)) + defer func() { + assert.NoError(t, runtimeService.StopContainer(cn, 10)) + }() + + t.Logf("Create the container with a proper command") + require.NoError(t, runtimeService.StopContainer(cn, 10)) + require.NoError(t, runtimeService.RemoveContainer(cn)) + + containerConfig = ContainerConfig( + "container1", + pauseImage, + WithTestLabels(), + WithTestAnnotations(), + ) + cn, err = runtimeService.CreateContainer(sb, containerConfig, sbConfig) + require.NoError(t, err) + require.NoError(t, runtimeService.StartContainer(cn)) +} diff --git a/integration/container_stats_test.go b/integration/container_stats_test.go index a3d4022..2380aa4 100644 --- a/integration/container_stats_test.go +++ b/integration/container_stats_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,27 +17,24 @@ package integration import ( + "errors" "fmt" goruntime "runtime" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // Test to verify for a container ID func TestContainerStats(t *testing.T) { t.Logf("Create a pod config and run sandbox container") - sbConfig := PodSandboxConfig("sandbox1", "stats") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox1", "stats") + + EnsureImageExists(t, pauseImage) + t.Logf("Create a container config and run container in a pod") containerConfig := ContainerConfig( "container1", @@ -64,8 +59,7 @@ func TestContainerStats(t *testing.T) { if err != nil { return false, err } - if s.GetWritableLayer().GetUsedBytes().GetValue() != 0 && - s.GetWritableLayer().GetInodesUsed().GetValue() != 0 { + if s.GetWritableLayer().GetUsedBytes().GetValue() != 0 { return true, nil } return false, nil @@ -78,20 +72,10 @@ func TestContainerStats(t *testing.T) { // Test to verify if the consumed stats are correct. func TestContainerConsumedStats(t *testing.T) { t.Logf("Create a pod config and run sandbox container") - sbConfig := PodSandboxConfig("sandbox1", "stats") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox1", "stats") testImage := GetImage(ResourceConsumer) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + EnsureImageExists(t, testImage) t.Logf("Create a container config and run container in a pod") containerConfig := ContainerConfig( @@ -154,14 +138,15 @@ func TestContainerConsumedStats(t *testing.T) { // Test to verify filtering without any filter func TestContainerListStats(t *testing.T) { + var ( + stats []*runtime.ContainerStats + err error + ) t.Logf("Create a pod config and run sandbox container") - sbConfig := PodSandboxConfig("running-pod", "statsls") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "running-pod", "statsls") + + EnsureImageExists(t, pauseImage) + t.Logf("Create a container config and run containers in a pod") containerConfigMap := make(map[string]*runtime.ContainerConfig) for i := 0; i < 3; i++ { @@ -185,15 +170,13 @@ func TestContainerListStats(t *testing.T) { } t.Logf("Fetch all container stats") - var stats []*runtime.ContainerStats require.NoError(t, Eventually(func() (bool, error) { stats, err = runtimeService.ListContainerStats(&runtime.ContainerStatsFilter{}) if err != nil { return false, err } for _, s := range stats { - if s.GetWritableLayer().GetUsedBytes().GetValue() == 0 && - s.GetWritableLayer().GetInodesUsed().GetValue() == 0 { + if s.GetWritableLayer().GetUsedBytes().GetValue() == 0 { return false, nil } } @@ -209,14 +192,15 @@ func TestContainerListStats(t *testing.T) { // Test to verify filtering given a specific container ID // TODO Convert the filter tests into table driven tests and unit tests func TestContainerListStatsWithIdFilter(t *testing.T) { + var ( + stats []*runtime.ContainerStats + err error + ) t.Logf("Create a pod config and run sandbox container") - sbConfig := PodSandboxConfig("running-pod", "statsls") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "running-pod", "statsls") + + EnsureImageExists(t, pauseImage) + t.Logf("Create a container config and run containers in a pod") containerConfigMap := make(map[string]*runtime.ContainerConfig) for i := 0; i < 3; i++ { @@ -240,7 +224,6 @@ func TestContainerListStatsWithIdFilter(t *testing.T) { } t.Logf("Fetch container stats for each container with Filter") - var stats []*runtime.ContainerStats for id := range containerConfigMap { require.NoError(t, Eventually(func() (bool, error) { stats, err = runtimeService.ListContainerStats( @@ -251,8 +234,7 @@ func TestContainerListStatsWithIdFilter(t *testing.T) { if len(stats) != 1 { return false, errors.New("unexpected stats length") } - if stats[0].GetWritableLayer().GetUsedBytes().GetValue() != 0 && - stats[0].GetWritableLayer().GetInodesUsed().GetValue() != 0 { + if stats[0].GetWritableLayer().GetUsedBytes().GetValue() != 0 { return true, nil } return false, nil @@ -269,14 +251,15 @@ func TestContainerListStatsWithIdFilter(t *testing.T) { // Test to verify filtering given a specific Sandbox ID. Stats for // all the containers in a pod should be returned func TestContainerListStatsWithSandboxIdFilter(t *testing.T) { + var ( + stats []*runtime.ContainerStats + err error + ) t.Logf("Create a pod config and run sandbox container") - sbConfig := PodSandboxConfig("running-pod", "statsls") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "running-pod", "statsls") + + EnsureImageExists(t, pauseImage) + t.Logf("Create a container config and run containers in a pod") containerConfigMap := make(map[string]*runtime.ContainerConfig) for i := 0; i < 3; i++ { @@ -300,7 +283,6 @@ func TestContainerListStatsWithSandboxIdFilter(t *testing.T) { } t.Logf("Fetch container stats for each container with Filter") - var stats []*runtime.ContainerStats require.NoError(t, Eventually(func() (bool, error) { stats, err = runtimeService.ListContainerStats( &runtime.ContainerStatsFilter{PodSandboxId: sb}) @@ -310,12 +292,16 @@ func TestContainerListStatsWithSandboxIdFilter(t *testing.T) { if len(stats) != 3 { return false, errors.New("unexpected stats length") } - if stats[0].GetWritableLayer().GetUsedBytes().GetValue() != 0 && - stats[0].GetWritableLayer().GetInodesUsed().GetValue() != 0 { - return true, nil + + for _, containerStats := range stats { + // Wait for stats on all containers, not just the first one in the list. + if containerStats.GetWritableLayer().GetUsedBytes().GetValue() == 0 { + return false, nil + } } - return false, nil - }, time.Second, 30*time.Second)) + return true, nil + }, time.Second, 45*time.Second)) + // TODO(claudiub): Reduce the timer above to 30 seconds once Windows flakiness has been addressed. t.Logf("Verify container stats for sandbox %q", sb) for _, s := range stats { testStats(t, s, containerConfigMap[s.GetAttributes().GetId()]) @@ -325,14 +311,15 @@ func TestContainerListStatsWithSandboxIdFilter(t *testing.T) { // Test to verify filtering given a specific container ID and // sandbox ID func TestContainerListStatsWithIdSandboxIdFilter(t *testing.T) { + var ( + stats []*runtime.ContainerStats + err error + ) t.Logf("Create a pod config and run sandbox container") - sbConfig := PodSandboxConfig("running-pod", "statsls") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "running-pod", "statsls") + + EnsureImageExists(t, pauseImage) + t.Logf("Create container config and run containers in a pod") containerConfigMap := make(map[string]*runtime.ContainerConfig) for i := 0; i < 3; i++ { @@ -355,7 +342,6 @@ func TestContainerListStatsWithIdSandboxIdFilter(t *testing.T) { }() } t.Logf("Fetch container stats for sandbox ID and container ID filter") - var stats []*runtime.ContainerStats for id, config := range containerConfigMap { require.NoError(t, Eventually(func() (bool, error) { stats, err = runtimeService.ListContainerStats( @@ -366,8 +352,7 @@ func TestContainerListStatsWithIdSandboxIdFilter(t *testing.T) { if len(stats) != 1 { return false, errors.New("unexpected stats length") } - if stats[0].GetWritableLayer().GetUsedBytes().GetValue() != 0 && - stats[0].GetWritableLayer().GetInodesUsed().GetValue() != 0 { + if stats[0].GetWritableLayer().GetUsedBytes().GetValue() != 0 { return true, nil } return false, nil @@ -387,10 +372,9 @@ func TestContainerListStatsWithIdSandboxIdFilter(t *testing.T) { return false, err } if len(stats) != 1 { - return false, errors.New("unexpected stats length") + return false, fmt.Errorf("expected only one stat, but got %v", stats) } - if stats[0].GetWritableLayer().GetUsedBytes().GetValue() != 0 && - stats[0].GetWritableLayer().GetInodesUsed().GetValue() != 0 { + if stats[0].GetWritableLayer().GetUsedBytes().GetValue() != 0 { return true, nil } return false, nil @@ -421,5 +405,9 @@ func testStats(t *testing.T, require.NotEmpty(t, s.GetWritableLayer().GetTimestamp()) require.NotEmpty(t, s.GetWritableLayer().GetFsId().GetMountpoint()) require.NotEmpty(t, s.GetWritableLayer().GetUsedBytes().GetValue()) - require.NotEmpty(t, s.GetWritableLayer().GetInodesUsed().GetValue()) + + // Windows does not collect inodes stats. + if goruntime.GOOS != "windows" { + require.NotEmpty(t, s.GetWritableLayer().GetInodesUsed().GetValue()) + } } diff --git a/integration/container_stop_test.go b/integration/container_stop_test.go index 6c5c8ce..b2a2068 100644 --- a/integration/container_stop_test.go +++ b/integration/container_stop_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -20,12 +18,13 @@ package integration import ( "context" + goruntime "runtime" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestSharedPidMultiProcessContainerStop(t *testing.T) { @@ -46,12 +45,8 @@ func TestSharedPidMultiProcessContainerStop(t *testing.T) { testImage = GetImage(BusyBox) containerName = "test-container" ) - t.Logf("Pull test image %q", testImage) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + + EnsureImageExists(t, testImage) t.Log("Create a multi-process container") cnConfig := ContainerConfig( @@ -77,25 +72,18 @@ func TestSharedPidMultiProcessContainerStop(t *testing.T) { } func TestContainerStopCancellation(t *testing.T) { + if goruntime.GOOS == "windows" { + t.Skip("Skipped on Windows.") + } t.Log("Create a pod sandbox") - sbConfig := PodSandboxConfig("sandbox", "cancel-container-stop") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "cancel-container-stop") var ( testImage = GetImage(BusyBox) containerName = "test-container" ) - t.Logf("Pull test image %q", testImage) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + + EnsureImageExists(t, testImage) t.Log("Create a container which traps sigterm") cnConfig := ContainerConfig( diff --git a/integration/container_update_resources_test.go b/integration/container_update_resources_test.go index 83850e0..1d05800 100644 --- a/integration/container_update_resources_test.go +++ b/integration/container_update_resources_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,14 +20,19 @@ package integration import ( + "bytes" + "os" + "strings" "testing" "github.com/containerd/cgroups" + cgroupsv2 "github.com/containerd/cgroups/v2" + "github.com/containerd/containerd" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func checkMemoryLimit(t *testing.T, spec *runtimespec.Spec, memLimit int64) { @@ -38,15 +44,194 @@ func checkMemoryLimit(t *testing.T, spec *runtimespec.Spec, memLimit int64) { assert.Equal(t, memLimit, *spec.Linux.Resources.Memory.Limit) } -func TestUpdateContainerResources(t *testing.T) { +func checkMemorySwapLimit(t *testing.T, spec *runtimespec.Spec, memLimit *int64) { + require.NotNil(t, spec) + require.NotNil(t, spec.Linux) + require.NotNil(t, spec.Linux.Resources) + require.NotNil(t, spec.Linux.Resources.Memory) + if memLimit == nil { + require.Nil(t, spec.Linux.Resources.Memory.Swap) + } else { + require.NotNil(t, spec.Linux.Resources.Memory.Swap) + assert.Equal(t, *memLimit, *spec.Linux.Resources.Memory.Swap) + } +} + +func checkMemoryLimitInContainerStatus(t *testing.T, status *runtime.ContainerStatus, memLimit int64) { + t.Helper() + require.NotNil(t, status) + require.NotNil(t, status.Resources) + require.NotNil(t, status.Resources.Linux) + require.NotNil(t, status.Resources.Linux.MemoryLimitInBytes) + assert.Equal(t, memLimit, status.Resources.Linux.MemoryLimitInBytes) +} + +func getCgroupSwapLimitForTask(t *testing.T, task containerd.Task) uint64 { + if cgroups.Mode() == cgroups.Unified { + groupPath, err := cgroupsv2.PidGroupPath(int(task.Pid())) + if err != nil { + t.Fatal(err) + } + cgroup2, err := cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath) + if err != nil { + t.Fatal(err) + } + stat, err := cgroup2.Stat() + if err != nil { + t.Fatal(err) + } + return stat.Memory.SwapLimit + stat.Memory.UsageLimit + } + cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid()))) + if err != nil { + t.Fatal(err) + } + stat, err := cgroup.Stat(cgroups.IgnoreNotExist) + if err != nil { + t.Fatal(err) + } + return stat.Memory.HierarchicalSwapLimit +} + +func getCgroupMemoryLimitForTask(t *testing.T, task containerd.Task) uint64 { + if cgroups.Mode() == cgroups.Unified { + groupPath, err := cgroupsv2.PidGroupPath(int(task.Pid())) + if err != nil { + t.Fatal(err) + } + cgroup2, err := cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath) + if err != nil { + t.Fatal(err) + } + stat, err := cgroup2.Stat() + if err != nil { + t.Fatal(err) + } + return stat.Memory.UsageLimit + } + + cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid()))) + if err != nil { + t.Fatal(err) + } + stat, err := cgroup.Stat(cgroups.IgnoreNotExist) + if err != nil { + t.Fatal(err) + } + return stat.Memory.Usage.Limit +} + +func isSwapLikelyEnabled() bool { + // Check whether swap is enabled. + swapFile := "/proc/swaps" + swapData, err := os.ReadFile(swapFile) + if err != nil { + // We can't read the file or it doesn't exist, assume we don't have swap. + return false + } + + swapData = bytes.TrimSpace(swapData) // extra trailing \n + swapLines := strings.Split(string(swapData), "\n") + + // If there is more than one line (table headers) in /proc/swaps, swap is enabled + if len(swapLines) <= 1 { + return false + } + + // Linux Kernel's prior to 5.8 can disable swap accounting and is disabled + // by default on Ubuntu. Most systems that run with cgroupsv2 enabled likely + // have swap accounting enabled, here we assume that is true when running with + // cgroupsv2 and check on cgroupsv1. + if cgroups.Mode() == cgroups.Unified { + return true + } + + _, err = os.Stat("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") + // Assume any error means this test can't run for now. + return err == nil +} + +func TestUpdateContainerResources_MemorySwap(t *testing.T) { + if !isSwapLikelyEnabled() { + t.Skipf("Swap or swap accounting are not enabled. Swap is required for this test") + return + } + t.Log("Create a sandbox") - sbConfig := PodSandboxConfig("sandbox", "update-container-resources") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "update-container-swap-resources") + + EnsureImageExists(t, pauseImage) + + memoryLimit := int64(128 * 1024 * 1024) + baseSwapLimit := int64(200 * 1024 * 1024) + increasedSwapLimit := int64(256 * 1024 * 1024) + + expectedBaseSwap := baseSwapLimit + expectedIncreasedSwap := increasedSwapLimit + + t.Log("Create a container with memory limit but no swap") + cnConfig := ContainerConfig( + "container", + pauseImage, + WithResources(&runtime.LinuxContainerResources{ + MemoryLimitInBytes: memoryLimit, + MemorySwapLimitInBytes: baseSwapLimit, + }), + ) + cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + + t.Log("Check memory limit in container OCI spec") + container, err := containerdClient.LoadContainer(context.Background(), cn) + require.NoError(t, err) + spec, err := container.Spec(context.Background()) + require.NoError(t, err) + checkMemoryLimit(t, spec, memoryLimit) + checkMemorySwapLimit(t, spec, &expectedBaseSwap) + + t.Log("Check memory limit in container OCI spec") + spec, err = container.Spec(context.Background()) + require.NoError(t, err) + sw1 := baseSwapLimit + checkMemorySwapLimit(t, spec, &sw1) + + t.Log("Start the container") + require.NoError(t, runtimeService.StartContainer(cn)) + task, err := container.Task(context.Background(), nil) + require.NoError(t, err) + + t.Log("Check memory limit in cgroup") + memLimit := getCgroupMemoryLimitForTask(t, task) + swapLimit := getCgroupSwapLimitForTask(t, task) + assert.Equal(t, uint64(memoryLimit), memLimit) + assert.Equal(t, uint64(expectedBaseSwap), swapLimit) + + t.Log("Update container memory limit after started") + err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{ + MemorySwapLimitInBytes: increasedSwapLimit, + }, nil) + require.NoError(t, err) + + t.Log("Check memory limit in container OCI spec") + spec, err = container.Spec(context.Background()) + require.NoError(t, err) + checkMemorySwapLimit(t, spec, &increasedSwapLimit) + + t.Log("Check memory limit in cgroup") + swapLimit = getCgroupSwapLimitForTask(t, task) + assert.Equal(t, uint64(expectedIncreasedSwap), swapLimit) +} + +func TestUpdateContainerResources_MemoryLimit(t *testing.T) { + // TODO(claudiub): Make this test work once https://github.com/microsoft/hcsshim/pull/931 merges. + t.Log("Create a sandbox") + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "update-container-resources") + + EnsureImageExists(t, pauseImage) + + expectedSwapLimit := func(memoryLimit int64) *int64 { + return &memoryLimit + } t.Log("Create a container with memory limit") cnConfig := ContainerConfig( @@ -65,17 +250,19 @@ func TestUpdateContainerResources(t *testing.T) { spec, err := container.Spec(context.Background()) require.NoError(t, err) checkMemoryLimit(t, spec, 200*1024*1024) + checkMemorySwapLimit(t, spec, expectedSwapLimit(200*1024*1024)) t.Log("Update container memory limit after created") err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{ MemoryLimitInBytes: 400 * 1024 * 1024, - }) + }, nil) require.NoError(t, err) t.Log("Check memory limit in container OCI spec") spec, err = container.Spec(context.Background()) require.NoError(t, err) checkMemoryLimit(t, spec, 400*1024*1024) + checkMemorySwapLimit(t, spec, expectedSwapLimit(400*1024*1024)) t.Log("Start the container") require.NoError(t, runtimeService.StartContainer(cn)) @@ -88,20 +275,73 @@ func TestUpdateContainerResources(t *testing.T) { stat, err := cgroup.Stat(cgroups.IgnoreNotExist) require.NoError(t, err) assert.Equal(t, uint64(400*1024*1024), stat.Memory.Usage.Limit) + swapLimit := getCgroupSwapLimitForTask(t, task) + assert.Equal(t, uint64(400*1024*1024), swapLimit) t.Log("Update container memory limit after started") err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{ MemoryLimitInBytes: 800 * 1024 * 1024, - }) + }, nil) require.NoError(t, err) t.Log("Check memory limit in container OCI spec") spec, err = container.Spec(context.Background()) require.NoError(t, err) checkMemoryLimit(t, spec, 800*1024*1024) + checkMemorySwapLimit(t, spec, expectedSwapLimit(800*1024*1024)) t.Log("Check memory limit in cgroup") stat, err = cgroup.Stat(cgroups.IgnoreNotExist) require.NoError(t, err) assert.Equal(t, uint64(800*1024*1024), stat.Memory.Usage.Limit) + swapLimit = getCgroupSwapLimitForTask(t, task) + assert.Equal(t, uint64(800*1024*1024), swapLimit) +} + +func TestUpdateContainerResources_StatusUpdated(t *testing.T) { + t.Log("Create a sandbox") + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "update-container-resources") + + EnsureImageExists(t, pauseImage) + + t.Log("Create a container with memory limit") + cnConfig := ContainerConfig( + "container", + pauseImage, + WithResources(&runtime.LinuxContainerResources{ + MemoryLimitInBytes: 200 * 1024 * 1024, + }), + ) + cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) + require.NoError(t, err) + + t.Log("Check memory limit in container status") + status, err := runtimeService.ContainerStatus(cn) + checkMemoryLimitInContainerStatus(t, status, 200*1024*1024) + require.NoError(t, err) + + t.Log("Update container memory limit after created") + err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{ + MemoryLimitInBytes: 400 * 1024 * 1024, + }, nil) + require.NoError(t, err) + + t.Log("Check memory limit in container status") + status, err = runtimeService.ContainerStatus(cn) + checkMemoryLimitInContainerStatus(t, status, 400*1024*1024) + require.NoError(t, err) + + t.Log("Start the container") + require.NoError(t, runtimeService.StartContainer(cn)) + + t.Log("Update container memory limit after started") + err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{ + MemoryLimitInBytes: 800 * 1024 * 1024, + }, nil) + require.NoError(t, err) + + t.Log("Check memory limit in container status") + status, err = runtimeService.ContainerStatus(cn) + checkMemoryLimitInContainerStatus(t, status, 800*1024*1024) + require.NoError(t, err) } diff --git a/integration/container_volume_test.go b/integration/container_volume_test.go new file mode 100644 index 0000000..1792be6 --- /dev/null +++ b/integration/container_volume_test.go @@ -0,0 +1,140 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +func createRegularFile(basePath, content string) (string, error) { + newFolder := filepath.Join(basePath, "regular") + err := os.Mkdir(newFolder, 0755) + if err != nil { + return "", err + } + + newFile := filepath.Join(newFolder, "foo.txt") + err = os.WriteFile(newFile, []byte(content), 0644) + return filepath.Join("regular", "foo.txt"), err +} + +func fileInSymlinkedFolder(basePath, targetFile string) (string, error) { + symlinkFolder := filepath.Join(basePath, "symlink_folder") + err := os.Symlink(filepath.Dir(targetFile), symlinkFolder) + + return filepath.Join(symlinkFolder, filepath.Base(targetFile)), err +} + +func symlinkedFile(basePath, targetFile string) (string, error) { + symlinkFile := filepath.Join(basePath, "symlink_file") + err := os.Symlink(targetFile, symlinkFile) + + return symlinkFile, err +} + +func symlinkedFileInSymlinkedFolder(basePath, targetFile string) (string, error) { + symlinkFolderFile, err := fileInSymlinkedFolder(basePath, targetFile) + if err != nil { + return "", err + } + + return symlinkedFile(basePath, symlinkFolderFile) +} + +func TestContainerSymlinkVolumes(t *testing.T) { + for name, testCase := range map[string]struct { + createFileFn func(basePath, targetFile string) (string, error) + }{ + // Create difference file / symlink scenarios: + // - symlink_file -> regular_folder/regular_file + // - symlink_folder/regular_file (symlink_folder -> regular_folder) + // - symlink_file -> symlink_folder/regular_file (symlink_folder -> regular_folder) + "file in symlinked folder": { + createFileFn: fileInSymlinkedFolder, + }, + "symlinked file": { + createFileFn: symlinkedFile, + }, + "symlinkedFileInSymlinkedFolder": { + createFileFn: symlinkedFileInSymlinkedFolder, + }, + } { + testCase := testCase // capture range variable + t.Run(name, func(t *testing.T) { + testPodLogDir := t.TempDir() + testVolDir := t.TempDir() + + content := "hello there\n" + regularFile, err := createRegularFile(testVolDir, content) + require.NoError(t, err) + + file, err := testCase.createFileFn(testVolDir, regularFile) + require.NoError(t, err) + + t.Log("Create test sandbox with log directory") + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "test-symlink", + WithPodLogDirectory(testPodLogDir), + ) + + var ( + testImage = GetImage(BusyBox) + containerName = "test-container" + ) + + EnsureImageExists(t, testImage) + + t.Log("Create a container with a symlink volume mount") + cnConfig := ContainerConfig( + containerName, + testImage, + WithCommand("cat", "/mounted_file"), + WithLogPath(containerName), + WithVolumeMount(file, "/mounted_file"), + ) + + cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) + require.NoError(t, err) + + t.Log("Start the container") + require.NoError(t, runtimeService.StartContainer(cn)) + + t.Log("Wait for container to finish running") + require.NoError(t, Eventually(func() (bool, error) { + s, err := runtimeService.ContainerStatus(cn) + if err != nil { + return false, err + } + if s.GetState() == runtime.ContainerState_CONTAINER_EXITED { + return true, nil + } + return false, nil + }, time.Second, 30*time.Second)) + + output, err := os.ReadFile(filepath.Join(testPodLogDir, containerName)) + assert.NoError(t, err) + + assert.Contains(t, string(output), content) + }) + } +} diff --git a/integration/container_without_image_ref_test.go b/integration/container_without_image_ref_test.go index e284bf6..0107102 100644 --- a/integration/container_without_image_ref_test.go +++ b/integration/container_without_image_ref_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -23,30 +21,20 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // Test container lifecycle can work without image references. func TestContainerLifecycleWithoutImageRef(t *testing.T) { t.Log("Create a sandbox") - sbConfig := PodSandboxConfig("sandbox", "container-lifecycle-without-image-ref") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "container-lifecycle-without-image-ref") var ( testImage = GetImage(BusyBox) containerName = "test-container" ) - t.Log("Pull test image") - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + + img := EnsureImageExists(t, testImage) t.Log("Create test container") cnConfig := ContainerConfig( diff --git a/integration/containerd_image_test.go b/integration/containerd_image_test.go index 9d81b38..b76ae45 100644 --- a/integration/containerd_image_test.go +++ b/integration/containerd_image_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,18 +17,18 @@ package integration import ( + "errors" + "fmt" "testing" "time" - "golang.org/x/net/context" - "github.com/containerd/containerd" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // Test to test the CRI plugin should see image pulled into containerd directly. @@ -46,7 +44,7 @@ func TestContainerdImage(t *testing.T) { } t.Logf("pull the image into containerd") - _, err = containerdClient.Pull(ctx, testImage, containerd.WithPullUnpack) + _, err = containerdClient.Pull(ctx, testImage, containerd.WithPullUnpack, containerd.WithPullLabel("foo", "bar")) assert.NoError(t, err) defer func() { // Make sure the image is cleaned up in any case. @@ -79,10 +77,10 @@ func TestContainerdImage(t *testing.T) { } if len(img.RepoTags) != 1 { // RepoTags must have been populated correctly. - return false, errors.Errorf("unexpected repotags: %+v", img.RepoTags) + return false, fmt.Errorf("unexpected repotags: %+v", img.RepoTags) } if img.RepoTags[0] != testImage { - return false, errors.Errorf("unexpected repotag %q", img.RepoTags[0]) + return false, fmt.Errorf("unexpected repotag %q", img.RepoTags[0]) } return true, nil } @@ -123,19 +121,18 @@ func TestContainerdImage(t *testing.T) { assert.NoError(t, err) assert.Equal(t, imgByID.Labels()["io.cri-containerd.image"], "managed") + t.Logf("the image should be labeled") + img, err := containerdClient.GetImage(ctx, testImage) + assert.NoError(t, err) + assert.Equal(t, img.Labels()["foo"], "bar") + t.Logf("should be able to start container with the image") - sbConfig := PodSandboxConfig("sandbox", "containerd-image") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "containerd-image") cnConfig := ContainerConfig( "test-container", id, - WithCommand("top"), + WithCommand("sleep", "300"), ) cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) require.NoError(t, err) @@ -145,6 +142,9 @@ func TestContainerdImage(t *testing.T) { if err != nil { return false, err } + if s.Resources == nil || (s.Resources.Linux == nil && s.Resources.Windows == nil) { + return false, fmt.Errorf("No Resource field in container status: %+v", s) + } return s.GetState() == runtime.ContainerState_CONTAINER_RUNNING, nil } require.NoError(t, Eventually(checkContainer, 100*time.Millisecond, 10*time.Second)) @@ -185,13 +185,8 @@ func TestContainerdImageInOtherNamespaces(t *testing.T) { } require.NoError(t, Consistently(checkImage, 100*time.Millisecond, time.Second)) - sbConfig := PodSandboxConfig("sandbox", "test") - t.Logf("pull the image into cri plugin") - id, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: id})) - }() + PodSandboxConfigWithCleanup(t, "sandbox", "test") + EnsureImageExists(t, testImage) t.Logf("cri plugin should see the image now") img, err := imageService.ImageStatus(&runtime.ImageSpec{Image: testImage}) @@ -211,3 +206,21 @@ func TestContainerdImageInOtherNamespaces(t *testing.T) { } assert.NoError(t, Consistently(checkImage, 100*time.Millisecond, time.Second)) } + +func TestContainerdSandboxImage(t *testing.T) { + var pauseImage = GetImage(Pause) + ctx := context.Background() + + t.Log("make sure the pause image exist") + pauseImg, err := containerdClient.GetImage(ctx, pauseImage) + require.NoError(t, err) + t.Log("ensure correct labels are set on pause image") + assert.Equal(t, pauseImg.Labels()["io.cri-containerd.pinned"], "pinned") + + t.Log("pause image should be seen by cri plugin") + pimg, err := imageService.ImageStatus(&runtime.ImageSpec{Image: pauseImage}) + require.NoError(t, err) + require.NotNil(t, pimg) + t.Log("verify pinned field is set for pause image") + assert.True(t, pimg.Pinned) +} diff --git a/vendor/k8s.io/cri-api/pkg/apis/services.go b/integration/cri-api/pkg/apis/services.go similarity index 56% rename from vendor/k8s.io/cri-api/pkg/apis/services.go rename to integration/cri-api/pkg/apis/services.go index 9a22ecb..6850093 100644 --- a/vendor/k8s.io/cri-api/pkg/apis/services.go +++ b/integration/cri-api/pkg/apis/services.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + /* Copyright 2016 The Kubernetes Authors. @@ -14,48 +30,51 @@ See the License for the specific language governing permissions and limitations under the License. */ +// this file is from k8s.io/cri-api/pkg/apis only it points to v1 as the runtimeapi not v1alpha + package cri import ( "time" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "google.golang.org/grpc" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" ) // RuntimeVersioner contains methods for runtime name, version and API version. type RuntimeVersioner interface { // Version returns the runtime name, runtime version and runtime API version - Version(apiVersion string) (*runtimeapi.VersionResponse, error) + Version(apiVersion string, opts ...grpc.CallOption) (*runtimeapi.VersionResponse, error) } // ContainerManager contains methods to manipulate containers managed by a // container runtime. The methods are thread-safe. type ContainerManager interface { // CreateContainer creates a new container in specified PodSandbox. - CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig, opts ...grpc.CallOption) (string, error) // StartContainer starts the container. - StartContainer(containerID string) error + StartContainer(containerID string, opts ...grpc.CallOption) error // StopContainer stops a running container with a grace period (i.e., timeout). - StopContainer(containerID string, timeout int64) error + StopContainer(containerID string, timeout int64, opts ...grpc.CallOption) error // RemoveContainer removes the container. - RemoveContainer(containerID string) error + RemoveContainer(containerID string, opts ...grpc.CallOption) error // ListContainers lists all containers by filters. - ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) + ListContainers(filter *runtimeapi.ContainerFilter, opts ...grpc.CallOption) ([]*runtimeapi.Container, error) // ContainerStatus returns the status of the container. - ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) + ContainerStatus(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStatus, error) // UpdateContainerResources updates the cgroup resources for the container. - UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error + UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error // ExecSync executes a command in the container, and returns the stdout output. // If command exits with a non-zero exit code, an error is returned. - ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) + ExecSync(containerID string, cmd []string, timeout time.Duration, opts ...grpc.CallOption) (stdout []byte, stderr []byte, err error) // Exec prepares a streaming endpoint to execute a command in the container, and returns the address. - Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) + Exec(req *runtimeapi.ExecRequest, opts ...grpc.CallOption) (*runtimeapi.ExecResponse, error) // Attach prepares a streaming endpoint to attach to a running container, and returns the address. - Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) + Attach(req *runtimeapi.AttachRequest, opts ...grpc.CallOption) (*runtimeapi.AttachResponse, error) // ReopenContainerLog asks runtime to reopen the stdout/stderr log file // for the container. If it returns error, new container log file MUST NOT // be created. - ReopenContainerLog(ContainerID string) error + ReopenContainerLog(ContainerID string, opts ...grpc.CallOption) error } // PodSandboxManager contains methods for operating on PodSandboxes. The methods @@ -63,19 +82,19 @@ type ContainerManager interface { type PodSandboxManager interface { // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // the sandbox is in ready state. - RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) + RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string, opts ...grpc.CallOption) (string, error) // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be force terminated. - StopPodSandbox(podSandboxID string) error + StopPodSandbox(podSandboxID string, opts ...grpc.CallOption) error // RemovePodSandbox removes the sandbox. If there are running containers in the // sandbox, they should be forcibly removed. - RemovePodSandbox(podSandboxID string) error + RemovePodSandbox(podSandboxID string, opts ...grpc.CallOption) error // PodSandboxStatus returns the Status of the PodSandbox. - PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) + PodSandboxStatus(podSandboxID string, opts ...grpc.CallOption) (*runtimeapi.PodSandboxStatus, error) // ListPodSandbox returns a list of Sandbox. - ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) + ListPodSandbox(filter *runtimeapi.PodSandboxFilter, opts ...grpc.CallOption) ([]*runtimeapi.PodSandbox, error) // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. - PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) + PortForward(req *runtimeapi.PortForwardRequest, opts ...grpc.CallOption) (*runtimeapi.PortForwardResponse, error) } // ContainerStatsManager contains methods for retrieving the container @@ -83,9 +102,9 @@ type PodSandboxManager interface { type ContainerStatsManager interface { // ContainerStats returns stats of the container. If the container does not // exist, the call returns an error. - ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) + ContainerStats(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStats, error) // ListContainerStats returns stats of all running containers. - ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) + ListContainerStats(filter *runtimeapi.ContainerStatsFilter, opts ...grpc.CallOption) ([]*runtimeapi.ContainerStats, error) } // RuntimeService interface should be implemented by a container runtime. @@ -97,9 +116,9 @@ type RuntimeService interface { ContainerStatsManager // UpdateRuntimeConfig updates runtime configuration if specified - UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error + UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig, opts ...grpc.CallOption) error // Status returns the status of the runtime. - Status() (*runtimeapi.RuntimeStatus, error) + Status(opts ...grpc.CallOption) (*runtimeapi.RuntimeStatus, error) } // ImageManagerService interface should be implemented by a container image @@ -107,13 +126,13 @@ type RuntimeService interface { // The methods should be thread-safe. type ImageManagerService interface { // ListImages lists the existing images. - ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) + ListImages(filter *runtimeapi.ImageFilter, opts ...grpc.CallOption) ([]*runtimeapi.Image, error) // ImageStatus returns the status of the image. - ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) + ImageStatus(image *runtimeapi.ImageSpec, opts ...grpc.CallOption) (*runtimeapi.Image, error) // PullImage pulls an image with the authentication config. - PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig, opts ...grpc.CallOption) (string, error) // RemoveImage removes the image. - RemoveImage(image *runtimeapi.ImageSpec) error + RemoveImage(image *runtimeapi.ImageSpec, opts ...grpc.CallOption) error // ImageFsInfo returns information of the filesystem that is used to store images. - ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) + ImageFsInfo(opts ...grpc.CallOption) ([]*runtimeapi.FilesystemUsage, error) } diff --git a/integration/duplicate_name_test.go b/integration/duplicate_name_test.go index 6c85320..9b44648 100644 --- a/integration/duplicate_name_test.go +++ b/integration/duplicate_name_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -21,24 +19,19 @@ package integration import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDuplicateName(t *testing.T) { t.Logf("Create a sandbox") - sbConfig := PodSandboxConfig("sandbox", "duplicate-name") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "duplicate-name") t.Logf("Create the sandbox again should fail") - _, err = runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) + _, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) require.Error(t, err) + EnsureImageExists(t, pauseImage) + t.Logf("Create a container") cnConfig := ContainerConfig( "container", diff --git a/integration/failpoint/cmd/cni-bridge-fp/README.md b/integration/failpoint/cmd/cni-bridge-fp/README.md new file mode 100644 index 0000000..065e3b5 --- /dev/null +++ b/integration/failpoint/cmd/cni-bridge-fp/README.md @@ -0,0 +1,159 @@ +## cni-bridge-f(ail)p(oint) + +### Overview + +The `cni-bridge-fp` is a CNI plugin which delegates interface-creating function +to [CNI bridge plugin][1] and allows user to inject failpoint before delegation. + +Since the CNI plugin is invoked by binary call from CRI and it is short-lived, +the failpoint need to be configured by a JSON file, which can be persisted. +There is an example about failpoint description. + +```json +{ + "cmdAdd": "1*error(you-shall-not-pass!)->1*panic(again)", + "cmdDel": "1*error(try-again)", + "cmdCheck": "10*off" +} +``` + +* `cmdAdd` (string, optional): The failpoint for `ADD` command. +* `cmdDel` (string, optional): The failpoint for `DEL` command. +* `cmdCheck` (string, optional): The failpoint for `CHECK` command. + +Since the `cmdXXX` can be multiple failpoints, each CNI binary call will update +the current state to make sure the order of execution is expected. + +And the failpoint injection is enabled by pod's annotation. Currently, the key +of customized CNI capabilities in containerd can only be `io.kubernetes.cri.pod-annotations` +and containerd will pass pod's annotations to CNI under the that object. The +user can use the `failpoint.cni.containerd.io/confpath` annotation to enable +failpoint for the pod. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx + annotations: + failpoint.cni.containerd.io/confpath: "/tmp/pod-failpoints.json" +spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +``` + +### Example + +Let's use the following json as failpoint description. + +```bash +$ cat <1*panic(sorry)" +} +EOF +``` + +And use `ip netns` to create persisted net namespace named by `failpoint`. + +```bash +$ sudo ip netns add failpoint +``` + +And then setup the following bash script for demo. + +```bash +$ cat <1*panic(sorry)" +} +``` + +We should setup CNI successfully after retry. When we teardown the interface, +there should be two failpoints. + +```bash +$ sudo CNI_COMMAND=ADD bash /tmp/cni-failpoint-demo-helper.sh +... + +$ sudo CNI_COMMAND=DEL bash /tmp/cni-failpoint-demo-helper.sh +{ + "code": 999, + "msg": "oops" +} + +$ sudo CNI_COMMAND=DEL bash /tmp/cni-failpoint-demo-helper.sh +{ + "code": 999, + "msg": "oops" +} + +$ cat /tmp/cni-failpoint.json | jq . +{ + "cmdAdd": "0*error(try-again)", + "cmdDel": "0*error(oops)", + "cmdCheck": "1*off->1*panic(sorry)" +} +``` + +[1]: diff --git a/integration/failpoint/cmd/cni-bridge-fp/main_linux.go b/integration/failpoint/cmd/cni-bridge-fp/main_linux.go new file mode 100644 index 0000000..50c4b20 --- /dev/null +++ b/integration/failpoint/cmd/cni-bridge-fp/main_linux.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + "github.com/containerd/containerd/pkg/failpoint" + "github.com/containerd/continuity" + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/skel" + "github.com/containernetworking/cni/pkg/version" +) + +const delegatedPlugin = "bridge" + +type netConf struct { + RuntimeConfig struct { + PodAnnotations inheritedPodAnnotations `json:"io.kubernetes.cri.pod-annotations"` + } `json:"runtimeConfig,omitempty"` +} + +type inheritedPodAnnotations struct { + // FailpointConfPath represents filepath of failpoint settings. + FailpointConfPath string `json:"failpoint.cni.containerd.io/confpath,omitempty"` +} + +// failpointConf is used to describe cmdAdd/cmdDel/cmdCheck command's failpoint. +type failpointConf struct { + Add string `json:"cmdAdd,omitempty"` + Del string `json:"cmdDel,omitempty"` + Check string `json:"cmdCheck,omitempty"` +} + +func main() { + skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, "bridge with failpoint support") +} + +func cmdAdd(args *skel.CmdArgs) error { + if err := handleFailpoint(args, "ADD"); err != nil { + return err + } + + result, err := invoke.DelegateAdd(context.TODO(), delegatedPlugin, args.StdinData, nil) + if err != nil { + return err + } + return result.Print() +} + +func cmdCheck(args *skel.CmdArgs) error { + if err := handleFailpoint(args, "CHECK"); err != nil { + return err + } + + return invoke.DelegateCheck(context.TODO(), delegatedPlugin, args.StdinData, nil) +} + +func cmdDel(args *skel.CmdArgs) error { + if err := handleFailpoint(args, "DEL"); err != nil { + return err + } + + return invoke.DelegateDel(context.TODO(), delegatedPlugin, args.StdinData, nil) +} + +func handleFailpoint(args *skel.CmdArgs, cmdKind string) error { + var conf netConf + if err := json.Unmarshal(args.StdinData, &conf); err != nil { + return fmt.Errorf("failed to parse network configuration: %w", err) + } + + confPath := conf.RuntimeConfig.PodAnnotations.FailpointConfPath + if len(confPath) == 0 { + return nil + } + + control, err := newFailpointControl(confPath) + if err != nil { + return err + } + + evalFn, err := control.delegatedEvalFn(cmdKind) + if err != nil { + return err + } + return evalFn() +} + +type failpointControl struct { + confPath string +} + +func newFailpointControl(confPath string) (*failpointControl, error) { + if !filepath.IsAbs(confPath) { + return nil, fmt.Errorf("failpoint confPath(%s) is required to be absolute", confPath) + } + + return &failpointControl{ + confPath: confPath, + }, nil +} + +func (c *failpointControl) delegatedEvalFn(cmdKind string) (failpoint.EvalFn, error) { + var resFn failpoint.EvalFn = nopEvalFn + + if err := c.updateTx(func(conf *failpointConf) error { + var fpStr *string + + switch cmdKind { + case "ADD": + fpStr = &conf.Add + case "DEL": + fpStr = &conf.Del + case "CHECK": + fpStr = &conf.Check + } + + if fpStr == nil || *fpStr == "" { + return nil + } + + fp, err := failpoint.NewFailpoint(cmdKind, *fpStr) + if err != nil { + return fmt.Errorf("failed to parse failpoint %s: %w", *fpStr, err) + } + + resFn = fp.DelegatedEval() + + *fpStr = fp.Marshal() + return nil + + }); err != nil { + return nil, err + } + return resFn, nil +} + +func (c *failpointControl) updateTx(updateFn func(conf *failpointConf) error) error { + f, err := os.OpenFile(c.confPath, os.O_RDWR, 0666) + if err != nil { + return fmt.Errorf("failed to open confPath %s: %w", c.confPath, err) + } + defer f.Close() + + if err := flock(f.Fd()); err != nil { + return fmt.Errorf("failed to lock failpoint setting %s: %w", c.confPath, err) + } + defer unflock(f.Fd()) + + data, err := io.ReadAll(f) + if err != nil { + return fmt.Errorf("failed to read failpoint setting %s: %w", c.confPath, err) + } + + var conf failpointConf + if err := json.Unmarshal(data, &conf); err != nil { + return fmt.Errorf("failed to unmarshal failpoint conf %s: %w", string(data), err) + } + + if err := updateFn(&conf); err != nil { + return err + } + + data, err = json.Marshal(conf) + if err != nil { + return fmt.Errorf("failed to marshal failpoint conf: %w", err) + } + return continuity.AtomicWriteFile(c.confPath, data, 0666) +} + +func nopEvalFn() error { + return nil +} + +func flock(fd uintptr) error { + return syscall.Flock(int(fd), syscall.LOCK_EX) +} + +func unflock(fd uintptr) error { + return syscall.Flock(int(fd), syscall.LOCK_UN) +} diff --git a/pkg/cri/platforms/default_unix.go b/integration/failpoint/cmd/containerd-shim-runc-fp-v1/main_linux.go similarity index 71% rename from pkg/cri/platforms/default_unix.go rename to integration/failpoint/cmd/containerd-shim-runc-fp-v1/main_linux.go index ca7de55..158843b 100644 --- a/pkg/cri/platforms/default_unix.go +++ b/integration/failpoint/cmd/containerd-shim-runc-fp-v1/main_linux.go @@ -1,5 +1,3 @@ -// +build !windows - /* Copyright The containerd Authors. @@ -16,13 +14,15 @@ limitations under the License. */ -package platforms +package main import ( - "github.com/containerd/containerd/platforms" + "context" + + "github.com/containerd/containerd/runtime/v2/runc/manager" + "github.com/containerd/containerd/runtime/v2/shim" ) -// Default returns the current platform's default platform specification. -func Default() platforms.MatchComparer { - return platforms.Default() +func main() { + shim.RunManager(context.Background(), manager.NewShimManager("io.containerd.runc-fp.v1")) } diff --git a/integration/failpoint/cmd/containerd-shim-runc-fp-v1/plugin_linux.go b/integration/failpoint/cmd/containerd-shim-runc-fp-v1/plugin_linux.go new file mode 100644 index 0000000..0ff1f38 --- /dev/null +++ b/integration/failpoint/cmd/containerd-shim-runc-fp-v1/plugin_linux.go @@ -0,0 +1,141 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/pkg/failpoint" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime/v2/runc/task" + "github.com/containerd/containerd/runtime/v2/shim" + taskapi "github.com/containerd/containerd/runtime/v2/task" + "github.com/containerd/ttrpc" +) + +const ( + ociConfigFilename = "config.json" + + failpointPrefixKey = "io.containerd.runtime.v2.shim.failpoint." +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.TTRPCPlugin, + ID: "task", + Requires: []plugin.Type{ + plugin.EventPlugin, + plugin.InternalPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + pp, err := ic.GetByID(plugin.EventPlugin, "publisher") + if err != nil { + return nil, err + } + ss, err := ic.GetByID(plugin.InternalPlugin, "shutdown") + if err != nil { + return nil, err + } + fps, err := newFailpointFromOCIAnnotation() + if err != nil { + return nil, err + } + service, err := task.NewTaskService(ic.Context, pp.(shim.Publisher), ss.(shutdown.Service)) + if err != nil { + return nil, err + } + + return &taskServiceWithFp{ + fps: fps, + local: service, + }, nil + }, + }) + +} + +type taskServiceWithFp struct { + fps map[string]*failpoint.Failpoint + local taskapi.TaskService +} + +func (s *taskServiceWithFp) RegisterTTRPC(server *ttrpc.Server) error { + taskapi.RegisterTaskService(server, s.local) + return nil +} + +func (s *taskServiceWithFp) UnaryInterceptor() ttrpc.UnaryServerInterceptor { + return func(ctx context.Context, unmarshal ttrpc.Unmarshaler, info *ttrpc.UnaryServerInfo, method ttrpc.Method) (interface{}, error) { + methodName := filepath.Base(info.FullMethod) + if fp, ok := s.fps[methodName]; ok { + if err := fp.Evaluate(); err != nil { + return nil, err + } + } + return method(ctx, unmarshal) + } +} + +// newFailpointFromOCIAnnotation reloads and parses the annotation from +// bundle-path/config.json. +// +// The annotation controlling task API's failpoint should be like: +// +// io.containerd.runtime.v2.shim.failpoint.Create = 1*off->1*error(please retry) +// +// The `Create` is the shim unary API and the value of annotation is the +// failpoint control. The function will return a set of failpoint controllers. +func newFailpointFromOCIAnnotation() (map[string]*failpoint.Failpoint, error) { + // NOTE: shim's current working dir is in bundle dir. + cwd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("failed to get current working dir: %w", err) + } + + configPath := filepath.Join(cwd, ociConfigFilename) + data, err := os.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("failed to read %v: %w", configPath, err) + } + + var spec oci.Spec + if err := json.Unmarshal(data, &spec); err != nil { + return nil, fmt.Errorf("failed to parse oci.Spec(%v): %w", string(data), err) + } + + res := make(map[string]*failpoint.Failpoint) + for k, v := range spec.Annotations { + if !strings.HasPrefix(k, failpointPrefixKey) { + continue + } + + methodName := strings.TrimPrefix(k, failpointPrefixKey) + fp, err := failpoint.NewFailpoint(methodName, v) + if err != nil { + return nil, fmt.Errorf("failed to parse failpoint %v: %w", v, err) + } + res[methodName] = fp + } + return res, nil +} diff --git a/integration/image_list.sample.toml b/integration/image_list.sample.toml index eabe415..6c095f2 100644 --- a/integration/image_list.sample.toml +++ b/integration/image_list.sample.toml @@ -1,5 +1,6 @@ alpine = "docker.io/library/alpine:latest" busybox = "docker.io/library/busybox:latest" -pause = "k8s.gcr.io/pause:3.5" -VolumeCopyUp = "gcr.io/k8s-cri-containerd/volume-copy-up:2.0" -VolumeOwnership = "gcr.io/k8s-cri-containerd/volume-ownership:2.0" +pause = "registry.k8s.io/pause:3.6" +VolumeCopyUp = "ghcr.io/containerd/volume-copy-up:2.1" +VolumeOwnership = "ghcr.io/containerd/volume-ownership:2.1" +ArgsEscaped = "cplatpublic.azurecr.io/args-escaped-test-image-ns:1.0" diff --git a/integration/image_load_test.go b/integration/image_load_test.go index 6de92d4..9964743 100644 --- a/integration/image_load_test.go +++ b/integration/image_load_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,15 +17,13 @@ package integration import ( - "io/ioutil" - "os" - "os/exec" + "path/filepath" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + exec "golang.org/x/sys/execabs" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // Test to load an image from tarball. @@ -41,12 +37,10 @@ func TestImageLoad(t *testing.T) { t.Logf("docker save image into tarball") output, err := exec.Command("docker", "pull", testImage).CombinedOutput() require.NoError(t, err, "output: %q", output) - tarF, err := ioutil.TempFile("", "image-load") - tar := tarF.Name() + // os.CreateTemp also opens a file, which might prevent us from overwriting that file with docker save. + tarDir := t.TempDir() + tar := filepath.Join(tarDir, "image.tar") require.NoError(t, err) - defer func() { - assert.NoError(t, os.RemoveAll(tar)) - }() output, err = exec.Command("docker", "save", testImage, "-o", tar).CombinedOutput() require.NoError(t, err, "output: %q", output) @@ -59,7 +53,7 @@ func TestImageLoad(t *testing.T) { t.Logf("load image in cri") ctr, err := exec.LookPath("ctr") - require.NoError(t, err, "ctr should be installed, make sure you've run `make install.deps`") + require.NoError(t, err, "ctr should be installed, make sure you've run `make install-deps`") output, err = exec.Command(ctr, "-address="+containerdEndpoint, "-n=k8s.io", "images", "import", tar).CombinedOutput() require.NoError(t, err, "output: %q", output) @@ -77,13 +71,7 @@ func TestImageLoad(t *testing.T) { require.Equal(t, []string{loadedImage}, img.RepoTags) t.Logf("create a container with the loaded image") - sbConfig := PodSandboxConfig("sandbox", Randomize("image-load")) - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", Randomize("image-load")) containerConfig := ContainerConfig( "container", testImage, diff --git a/integration/imagefs_info_test.go b/integration/imagefs_info_test.go index f64653d..ea83a22 100644 --- a/integration/imagefs_info_test.go +++ b/integration/imagefs_info_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,33 +17,22 @@ package integration import ( + "fmt" "os" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestImageFSInfo(t *testing.T) { - config := PodSandboxConfig("running-pod", "imagefs") + t.Logf("Create a sandbox to make sure there is an active snapshot") + PodSandboxConfigWithCleanup(t, "running-pod", "imagefs") t.Logf("Pull an image to make sure image fs is not empty") - img, err := imageService.PullImage(&runtime.ImageSpec{Image: GetImage(BusyBox)}, nil, config) - require.NoError(t, err) - defer func() { - err := imageService.RemoveImage(&runtime.ImageSpec{Image: img}) - assert.NoError(t, err) - }() - t.Logf("Create a sandbox to make sure there is an active snapshot") - sb, err := runtimeService.RunPodSandbox(config, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + EnsureImageExists(t, GetImage(BusyBox)) // It takes time to populate imagefs stats. Use eventually // to check for a period of time. @@ -60,12 +47,11 @@ func TestImageFSInfo(t *testing.T) { return false, nil } if len(stats) >= 2 { - return false, errors.Errorf("unexpected stats length: %d", len(stats)) + return false, fmt.Errorf("unexpected stats length: %d", len(stats)) } info = stats[0] if info.GetTimestamp() != 0 && info.GetUsedBytes().GetValue() != 0 && - info.GetInodesUsed().GetValue() != 0 && info.GetFsId().GetMountpoint() != "" { return true, nil } @@ -73,6 +59,6 @@ func TestImageFSInfo(t *testing.T) { }, time.Second, 30*time.Second)) t.Logf("Image filesystem mountpath should exist") - _, err = os.Stat(info.GetFsId().GetMountpoint()) + _, err := os.Stat(info.GetFsId().GetMountpoint()) assert.NoError(t, err) } diff --git a/integration/images/README.md b/integration/images/README.md new file mode 100644 index 0000000..32cc660 --- /dev/null +++ b/integration/images/README.md @@ -0,0 +1,127 @@ +# Test image overview + +Test images for Linux can be built as usual using buildx. + +While it is possible to build Windows docker images on Linux (if we avoid the ```RUN``` or ```WORKDIR``` options), the ```volume-ownership``` and ```volume-copy-up``` images need to be built on Windows for the tests to be relevant. The reason for this is that when building images on Linux, Windows specific security info (DACL and ownership) does not get attached to the test files and folders inside the image. The ```TestVolumeCopyUp``` and ```TestVolumeOwnership``` tests will not be relevant, as owners of the files will always be ```ContainerAdministrator```. + +Building images on Windows nodes also allows us to potentially add new users inside the images or enable new testing scenarios that require different services or applications to run inside the container. + +This document describes the needed bits to build the Windows container images on a remote Windows node. + +## Setting up the Windows build node + +We can build images for all relevant Windows versions on a single Windows node as long as that Windows node is a version greater or equal to the image versions we're trying to build. For example, on a Windows Server 2022 node, we can build images for 1809, 2004, 20H2 and ltsc2022, while if we were running on Windows server 2019 machine, we would only be able to generate images for 1809. To build images for different versions of Windows, we need to enable the ```Hyper-V``` role, and use ```--isolation=hyperv``` as an argument to docker build. + +Note, this will also work if nested hyperv is enabled. This means that the images can be built on Azure (nested Hyper-V is enabled by default), or on any modern linux machine using KVM and libvirt. +Note, at the time of this writing, the recommended version to build on is Windows Server 2022 (ltsc2022). + + +### Enabling nested VMX on Libvirt + +To enable nested Hyper-V on libvirt, simply install Windows Server 2022 as usual, then shutdown the guest and edit it's config: + +```bash +# replace win2k22 with the name of your Windows VM +virsh edit win2k22 +``` + +and add/edit the CPU section to look like this: + +```xml + + Broadwell + + +``` + +Hyper-V should now work inside your KVM machine. It's not terribly fast, but it should suffice for building images. + +### Enable necessary roles + +Install the needed roles and tools: + +```powershell +# Enable Hyper-V and management tools +Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V,Microsoft-Hyper-V-Management-Clients,Microsoft-Hyper-V-Management-PowerShell -All -NoRestart + +# Enable SSH (this can be skipped if you don't need it) +Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 + +# Install Docker +Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force -Confirm:$false +Install-Module -Name DockerMsftProvider -Repository PSGallery -Force -Confirm:$false +Install-Package -Name docker -ProviderName DockerMsftProvider -Force -Confirm:$false +``` + +At this point we can reboot for the changes to take effect: + +```powershell +Restart-Computer -Force +``` + +### Configure needed services + +Start sshd and enable it to run on startup: + +```powershell +Start-Service sshd +Set-Service -Name sshd -StartupType 'Automatic' +``` + +Open Firewall port for ssh: + +```powershell +New-NetFirewallRule -Name 'OpenSSH-Server-In-TCP' -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 +``` + +These following steps are taken from the [k8s windows image builder helper page](https://github.com/kubernetes/kubernetes/blob/master/test/images/windows/README.md). + + +Enable TLS authentication for docker and enable remote access: + +```powershell +# Replace YOUR_SERVER_IP_GOES_HERE with the IP addresses you'll use to access +# this node. This will be the private IP and VIP/Floating IP of the server. +docker run --isolation=hyperv --user=ContainerAdministrator --rm ` + -e SERVER_NAME=$(hostname) ` + -e IP_ADDRESSES=127.0.0.1,YOUR_SERVER_IP_GOES_HERE ` + -v "c:\programdata\docker:c:\programdata\docker" ` + -v "$env:USERPROFILE\.docker:c:\users\containeradministrator\.docker" stefanscherer/dockertls-windows:2.5.5 +``` + +Restart Docker: + +```powershell +Stop-Service docker +Start-Service docker +``` + +After this, the files (```ca.pem```, ```cert.pem``` and ```key.pem```) needed to authenticate to docker will be present in ```$env:USERPROFILE\.docker``` on the Windows machine. You will need to copy those files to your linux machine in ```$HOME/.docker```. They are needed in order to authenticate against the Windows docker daemon during our image build process. + +Open Firewall port for docker: + +```powershell +New-NetFirewallRule -Name 'Docker-TLS-In-TCP' -DisplayName 'Docker (TLS)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 2376 +``` + +Note, if you're running in a cloud, make sure you also open the port in your NSG/Security group. + +## Building the images + +With the above mentioned files copied to ```$HOME/.docker``` we can now start building the images: + +```bash +git clone https://github.com/containerd/containerd +cd containerd/integration/images/volume-copy-up + +make setup-buildx +make configure-docker +# 192.168.122.107 corresponds to the IP address of your windows build node. +# This builds the images and pushes them to the registry specified by PROJ +# The Windows images will be built on the Windows node and pushed from there. +# You will need to make sure that docker is configured and able to push to the +# project you want to push to. +make build-registry PROJ=docker.example.com REMOTE_DOCKER_URL=192.168.122.107:2376 +# Create a manifest and update it with all supported operating systems and architectures. +make push-manifest PROJ=docker.samfira.com REMOTE_DOCKER_URL=192.168.122.107:2376 +``` diff --git a/integration/images/volume-copy-up/Dockerfile b/integration/images/volume-copy-up/Dockerfile index ed6bba6..24ce3f4 100644 --- a/integration/images/volume-copy-up/Dockerfile +++ b/integration/images/volume-copy-up/Dockerfile @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM busybox +ARG BASE + +FROM $BASE RUN sh -c "mkdir /test_dir; echo test_content > /test_dir/test_file" VOLUME "/test_dir" diff --git a/integration/images/volume-copy-up/Dockerfile_windows b/integration/images/volume-copy-up/Dockerfile_windows new file mode 100644 index 0000000..5e57623 --- /dev/null +++ b/integration/images/volume-copy-up/Dockerfile_windows @@ -0,0 +1,37 @@ +# Copyright The containerd Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG BASE +FROM $BASE + +ADD https://github.com/kubernetes-sigs/windows-testing/raw/3fea3d48ea8337b2aaca755c1d719e34b45f46b9/images/busybox/busybox.exe /bin/busybox.exe + +ENV BUSYBOX_EXES="[ [[ ar arch ash awk base64 basename bash bunzip2 bzcat bzip2 cal cat chmod cksum clear cmp comm cp cpio cut date dc dd df diff dirname dos2unix dpkg-deb du echo ed egrep env expand e xpr factor false fgrep find fold fsync ftpget ftpput getopt grep groups gunzip gzip hd head hexdump id ipcalc kill killall less link ln logname ls lzcat lzma lzop lzopcat man md5sum mkdir mktemp mv nl od paste patch pgrep pidof pipe_progress pkill printenv printf ps pwd rev rm rmdir rpm rpm2cpio sed seq sh sha1sum sha256sum sha3sum sha512sum shred shuf sleep sort split ssl_client stat strings sum ta c tail tar tee test timeout touch tr true truncate ttysize uname uncompress unexpand uniq unix2dos unlink unlzma unlzop unxz unzip usleep uudecode uuencode vi watch wc wget which whoami whois xargs xxd xz xzcat yes zcat" + +USER ContainerAdministrator +WORKDIR C:/bin + +RUN cmd.exe /c "@echo off && FOR %i in (%BUSYBOX_EXES%) do (mklink %i.exe busybox.exe)" + +USER ContainerUser + +# Explicitly set full access rights for "CREATOR OWNER". While +# this is set by default for Windows Server 2019, it seems that +# on Windows Server 2022 it is not. +RUN mkdir C:\test_dir && \ + icacls C:\test_dir /grant "CREATOR OWNER":(OI)(CI)(IO)F /T && \ + /bin/sh.exe -c "echo test_content > /test_dir/test_file" + +ENV PATH="C:\bin;C:\Windows\System32;C:\Windows;" +VOLUME "C:/test_dir" diff --git a/integration/images/volume-copy-up/Makefile b/integration/images/volume-copy-up/Makefile index 2a3bb48..cba8174 100644 --- a/integration/images/volume-copy-up/Makefile +++ b/integration/images/volume-copy-up/Makefile @@ -14,21 +14,94 @@ all: build -PROJ=gcr.io/k8s-cri-containerd -VERSION=2.0 +PROJ=ghcr.io/containerd +VERSION=2.1 IMAGE=$(PROJ)/volume-copy-up:$(VERSION) -PLATFORMS?=linux/amd64,linux/arm64 +DOCKER_CERT_PATH ?= "$(HOME)/.docker" +REMOTE_DOCKER_URL ?= +DOCKER_REMOTE_ARGS ?= + + +ifneq ($(REMOTE_DOCKER_URL),) + DOCKER_REMOTE_ARGS = --tlsverify --tlscacert "$(DOCKER_CERT_PATH)/ca.pem" \ + --tlscert "$(DOCKER_CERT_PATH)/cert.pem" \ + --tlskey "$(DOCKER_CERT_PATH)/key.pem" \ + -H "$(REMOTE_DOCKER_URL)" +endif + +# Operating systems supported: linux, windows +OS ?= linux +# Architectures supported: amd64, arm64 +ARCH ?= amd64 +# OS Version for the Windows images: 1809, 20H2, ltsc2022 +OSVERSION ?= 1809 + +# The output type could either be docker (local), or registry. +# If it is registry, it will also allow us to push the Windows images. +OUTPUT_TYPE ?= docker + +ALL_OS = linux +ALL_ARCH.linux = amd64 arm64 ppc64le +ALL_OS_ARCH.linux = $(foreach arch, ${ALL_ARCH.linux}, linux-$(arch)) + +ifneq ($(REMOTE_DOCKER_URL),) +ALL_OS += windows +ALL_OSVERSIONS.windows := 1809 20H2 ltsc2022 +ALL_OS_ARCH.windows = $(foreach osversion, ${ALL_OSVERSIONS.windows}, windows-amd64-${osversion}) +BASE.windows := mcr.microsoft.com/windows/nanoserver +endif + +BASE.linux.amd64 := busybox +BASE.linux.arm64 := arm64v8/busybox +BASE.linux.ppc64le := busybox +BASE.linux := ${BASE.linux.${ARCH}} +BASE := ${BASE.${OS}} + +ALL_OS_ARCH = $(foreach os, $(ALL_OS), ${ALL_OS_ARCH.${os}}) configure-docker: - gcloud auth configure-docker + gcloud auth configure-docker --quiet + gcloud auth configure-docker --quiet $(shell echo $(PROJ) | cut -f 1 -d "/") || true -build: - docker buildx build \ - $(OUTPUT) \ - --platform=${PLATFORMS} \ - --tag $(IMAGE) . +setup-buildx: + docker buildx use img-builder || docker buildx create --name img-builder --use -push: OUTPUT=--push -push: configure-docker build +build: setup-buildx build-local -.PHONY: configure-docker build push +push: configure-docker setup-buildx build-registry push-manifest + +build-local: $(addprefix sub-container-docker-,$(ALL_OS_ARCH.linux)) +build-registry: $(addprefix sub-container-registry-,$(ALL_OS_ARCH)) + +# split words on hyphen, access by 1-index +word-hyphen = $(word $2,$(subst -, ,$1)) +sub-container-%: + $(MAKE) OUTPUT_TYPE=$(call word-hyphen,$*,1) OS=$(call word-hyphen,$*,2) ARCH=$(call word-hyphen,$*,3) OSVERSION=$(call word-hyphen,$*,4) container + +container: .container-${OS}-$(ARCH) + +.container-linux-$(ARCH): + docker buildx build --pull --output=type=${OUTPUT_TYPE} --platform ${OS}/${ARCH} \ + -t $(IMAGE)-${OS}-${ARCH} --build-arg BASE=${BASE} . + +.container-windows-$(ARCH): + docker $(DOCKER_REMOTE_ARGS) build --isolation=hyperv --no-cache --pull \ + -t $(IMAGE)-${OS}-${ARCH}-${OSVERSION} --build-arg BASE=${BASE}:${OSVERSION} \ + -f Dockerfile_windows . + docker $(DOCKER_REMOTE_ARGS) push $(IMAGE)-${OS}-${ARCH}-${OSVERSION} + +# For Windows images, we also need to include the "os.version" in the manifest list images, +# so the Windows node can pull the proper image it needs. +push-manifest: + docker manifest create --amend $(IMAGE) $(shell echo $(ALL_OS_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&~g") + set -x; for arch in $(ALL_ARCH.linux); do docker manifest annotate --os linux --arch $${arch} ${IMAGE} ${IMAGE}-linux-$${arch}; done + # we use awk to also trim the quotes around the OS version string. + set -x; \ + for osversion in ${ALL_OSVERSIONS.windows}; do \ + full_version=`docker manifest inspect ${BASE.windows}:$${osversion} | grep "os.version" | head -n 1 | awk -F\" '{print $$4}'` || true; \ + docker manifest annotate --os windows --arch amd64 --os-version $${full_version} \ + ${IMAGE} ${IMAGE}-windows-amd64-$${osversion}; \ + done + docker manifest push --purge ${IMAGE} + +.PHONY: configure-docker setup-buildx build push build-local build-registry container push-manifest diff --git a/integration/images/volume-ownership/Dockerfile_windows b/integration/images/volume-ownership/Dockerfile_windows new file mode 100644 index 0000000..7200690 --- /dev/null +++ b/integration/images/volume-ownership/Dockerfile_windows @@ -0,0 +1,39 @@ +# Copyright The containerd Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG BASE +FROM $BASE + +ADD https://github.com/kubernetes-sigs/windows-testing/raw/3fea3d48ea8337b2aaca755c1d719e34b45f46b9/images/busybox/busybox.exe /bin/busybox.exe + +ENV BUSYBOX_EXES="[ [[ ar arch ash awk base64 basename bash bunzip2 bzcat bzip2 cal cat chmod cksum clear cmp comm cp cpio cut date dc dd df diff dirname dos2unix dpkg-deb du echo ed egrep env expand e xpr factor false fgrep find fold fsync ftpget ftpput getopt grep groups gunzip gzip hd head hexdump id ipcalc kill killall less link ln logname ls lzcat lzma lzop lzopcat man md5sum mkdir mktemp mv nl od paste patch pgrep pidof pipe_progress pkill printenv printf ps pwd rev rm rmdir rpm rpm2cpio sed seq sh sha1sum sha256sum sha3sum sha512sum shred shuf sleep sort split ssl_client stat strings sum ta c tail tar tee test timeout touch tr true truncate ttysize uname uncompress unexpand uniq unix2dos unlink unlzma unlzop unxz unzip usleep uudecode uuencode vi watch wc wget which whoami whois xargs xxd xz xzcat yes zcat" + +USER ContainerAdministrator +WORKDIR C:/bin + +ADD tools/get_owner_windows.exe C:/bin/get_owner.exe +RUN cmd.exe /c "@echo off && FOR %i in (%BUSYBOX_EXES%) do (mklink %i.exe busybox.exe)" + +USER ContainerUser + +# Explicitly set full access rights for "CREATOR OWNER". While +# this is set by default for Windows Server 2019, it seems that +# on Windows Server 2022 it is not. +RUN mkdir C:\volumes && \ + icacls C:\volumes /grant "CREATOR OWNER":(OI)(CI)(IO)F /T && \ + mkdir C:\volumes\test_dir && \ + /bin/sh.exe -c "echo test_content > /volumes/test_dir/test_file" + +ENV PATH="C:\bin;C:\Windows\System32;C:\Windows;" +VOLUME "C:/volumes/test_dir" diff --git a/integration/images/volume-ownership/Makefile b/integration/images/volume-ownership/Makefile index ac257aa..175e11c 100644 --- a/integration/images/volume-ownership/Makefile +++ b/integration/images/volume-ownership/Makefile @@ -14,21 +14,100 @@ all: build -PROJ=gcr.io/k8s-cri-containerd -VERSION=2.0 +PROJ=ghcr.io/containerd +VERSION=2.1 IMAGE=$(PROJ)/volume-ownership:$(VERSION) -PLATFORMS?=linux/amd64,linux/arm64 +DOCKER_CERT_PATH ?= "$(HOME)/.docker" +REMOTE_DOCKER_URL ?= +DOCKER_REMOTE_ARGS ?= + + +ifneq ($(REMOTE_DOCKER_URL),) + DOCKER_REMOTE_ARGS = --tlsverify --tlscacert "$(DOCKER_CERT_PATH)/ca.pem" \ + --tlscert "$(DOCKER_CERT_PATH)/cert.pem" \ + --tlskey "$(DOCKER_CERT_PATH)/key.pem" \ + -H "$(REMOTE_DOCKER_URL)" +endif + +# Operating systems supported: linux, windows +OS ?= linux +# Architectures supported: amd64, arm64 +ARCH ?= amd64 +# OS Version for the Windows images: 1809, 20H2, ltsc2022 +OSVERSION ?= 1809 + +# The output type could either be docker (local), or registry. +# If it is registry, it will also allow us to push the Windows images. +OUTPUT_TYPE ?= docker + +ALL_OS = linux +ALL_ARCH.linux = amd64 arm64 ppc64le +ALL_OS_ARCH.linux = $(foreach arch, ${ALL_ARCH.linux}, linux-$(arch)) + +ifneq ($(REMOTE_DOCKER_URL),) +ALL_OS += windows +ALL_OSVERSIONS.windows := 1809 20H2 ltsc2022 +ALL_OS_ARCH.windows = $(foreach osversion, ${ALL_OSVERSIONS.windows}, windows-amd64-${osversion}) +BASE.windows := mcr.microsoft.com/windows/nanoserver +endif + +BASE.linux.amd64 := busybox +BASE.linux.arm64 := arm64v8/busybox +BASE.linux.ppc64le := busybox +BASE.linux := ${BASE.linux.${ARCH}} +BASE := ${BASE.${OS}} + +ALL_OS_ARCH = $(foreach os, $(ALL_OS), ${ALL_OS_ARCH.${os}}) configure-docker: - gcloud auth configure-docker + gcloud auth configure-docker --quiet + gcloud auth configure-docker --quiet $(shell echo $(PROJ) | cut -f 1 -d "/") || true -build: - docker buildx build \ - $(OUTPUT) \ - --platform=${PLATFORMS} \ - --tag $(IMAGE) . +setup-buildx: + docker buildx use img-builder || docker buildx create --name img-builder --use -push: OUTPUT=--push -push: configure-docker build +build: setup-buildx build-local -.PHONY: configure-docker build push +push: configure-docker setup-buildx build-registry push-manifest + +build-local: $(addprefix sub-container-docker-,$(ALL_OS_ARCH.linux)) +build-tools: + GOOS=windows go build -mod=vendor -o tools/get_owner_windows.exe tools/get_owner_windows.go + +clean-tools: + rm -f tools/get_owner_windows.exe || true + +build-registry: build-tools $(addprefix sub-container-registry-,$(ALL_OS_ARCH)) clean-tools + +# split words on hyphen, access by 1-index +word-hyphen = $(word $2,$(subst -, ,$1)) +sub-container-%: + $(MAKE) OUTPUT_TYPE=$(call word-hyphen,$*,1) OS=$(call word-hyphen,$*,2) ARCH=$(call word-hyphen,$*,3) OSVERSION=$(call word-hyphen,$*,4) container + +container: .container-${OS}-$(ARCH) + +.container-linux-$(ARCH): + docker buildx build --pull --output=type=${OUTPUT_TYPE} --platform ${OS}/${ARCH} \ + -t $(IMAGE)-${OS}-${ARCH} --build-arg BASE=${BASE} . + +.container-windows-$(ARCH): + docker $(DOCKER_REMOTE_ARGS) build --isolation=hyperv --no-cache --pull \ + -t $(IMAGE)-${OS}-${ARCH}-${OSVERSION} --build-arg BASE=${BASE}:${OSVERSION} \ + -f Dockerfile_windows . + docker $(DOCKER_REMOTE_ARGS) push $(IMAGE)-${OS}-${ARCH}-${OSVERSION} + +# For Windows images, we also need to include the "os.version" in the manifest list images, +# so the Windows node can pull the proper image it needs. +push-manifest: + docker manifest create --amend $(IMAGE) $(shell echo $(ALL_OS_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&~g") + set -x; for arch in $(ALL_ARCH.linux); do docker manifest annotate --os linux --arch $${arch} ${IMAGE} ${IMAGE}-linux-$${arch}; done + # we use awk to also trim the quotes around the OS version string. + set -x; \ + for osversion in ${ALL_OSVERSIONS.windows}; do \ + full_version=`docker manifest inspect ${BASE.windows}:$${osversion} | grep "os.version" | head -n 1 | awk -F\" '{print $$4}'` || true; \ + docker manifest annotate --os windows --arch amd64 --os-version $${full_version} \ + ${IMAGE} ${IMAGE}-windows-amd64-$${osversion}; \ + done + docker manifest push --purge ${IMAGE} + +.PHONY: configure-docker setup-buildx build push build-local build-registry container push-manifest clean-tools diff --git a/vendor/github.com/containerd/continuity/fs/diff_windows.go b/integration/images/volume-ownership/tools/get_owner_windows.go similarity index 53% rename from vendor/github.com/containerd/continuity/fs/diff_windows.go rename to integration/images/volume-ownership/tools/get_owner_windows.go index 4bfa72d..0432289 100644 --- a/vendor/github.com/containerd/continuity/fs/diff_windows.go +++ b/integration/images/volume-ownership/tools/get_owner_windows.go @@ -14,35 +14,40 @@ limitations under the License. */ -package fs +package main import ( + "fmt" + "log" "os" "golang.org/x/sys/windows" ) -func detectDirDiff(upper, lower string) *diffDirOptions { - return nil -} - -func compareSysStat(s1, s2 interface{}) (bool, error) { - f1, ok := s1.(windows.Win32FileAttributeData) - if !ok { - return false, nil +func main() { + if len(os.Args) != 2 { + fmt.Printf("Usage: %s file_or_directory\n", os.Args[0]) + os.Exit(1) } - f2, ok := s2.(windows.Win32FileAttributeData) - if !ok { - return false, nil + + if _, err := os.Stat(os.Args[1]); err != nil { + log.Fatal(err) } - return f1.FileAttributes == f2.FileAttributes, nil -} -func compareCapabilities(p1, p2 string) (bool, error) { - // TODO: Use windows equivalent - return true, nil -} + secInfo, err := windows.GetNamedSecurityInfo( + os.Args[1], windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION) -func isLinked(os.FileInfo) bool { - return false + if err != nil { + log.Fatal(err) + } + sid, _, err := secInfo.Owner() + if err != nil { + log.Fatal(err) + } + acct, _, _, err := sid.LookupAccount(".") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%s:%s", acct, sid) } diff --git a/integration/issue7496_linux_test.go b/integration/issue7496_linux_test.go new file mode 100644 index 0000000..2c7e33e --- /dev/null +++ b/integration/issue7496_linux_test.go @@ -0,0 +1,180 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "bufio" + "context" + "io" + "net" + "os" + "strconv" + "strings" + "syscall" + "testing" + "time" + + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/runtime/v2/shim" + apitask "github.com/containerd/containerd/runtime/v2/task" + "github.com/containerd/ttrpc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + exec "golang.org/x/sys/execabs" +) + +// TestIssue7496 is used to reproduce https://github.com/containerd/containerd/issues/7496 +// +// NOTE: https://github.com/containerd/containerd/issues/8931 is the same issue. +func TestIssue7496(t *testing.T) { + t.Logf("Checking CRI config's default runtime") + criCfg, err := CRIConfig() + require.NoError(t, err) + + typ := criCfg.ContainerdConfig.Runtimes[criCfg.ContainerdConfig.DefaultRuntimeName].Type + if !strings.HasSuffix(typ, "runc.v2") { + t.Skipf("default runtime should be runc.v2, but it's not: %s", typ) + } + + ctx := namespaces.WithNamespace(context.Background(), "k8s.io") + + t.Logf("Create a pod config and run sandbox container") + sbConfig := PodSandboxConfig("sandbox", "issue7496") + sbID, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) + require.NoError(t, err) + + shimCli := connectToShim(ctx, t, sbID) + + delayInSec := 12 + t.Logf("[shim pid: %d]: Injecting %d seconds delay to umount2 syscall", + shimPid(ctx, t, shimCli), + delayInSec) + + doneCh := injectDelayToUmount2(ctx, t, shimCli, delayInSec /* CRI plugin uses 10 seconds to delete task */) + + t.Logf("Create a container config and run container in a pod") + pauseImage := GetImage(Pause) + EnsureImageExists(t, pauseImage) + + containerConfig := ContainerConfig("pausecontainer", pauseImage) + cnID, err := runtimeService.CreateContainer(sbID, containerConfig, sbConfig) + require.NoError(t, err) + require.NoError(t, runtimeService.StartContainer(cnID)) + + t.Logf("Start to StopPodSandbox and RemovePodSandbox") + ctx, cancelFn := context.WithTimeout(ctx, 3*time.Minute) + defer cancelFn() + for { + select { + case <-ctx.Done(): + require.NoError(t, ctx.Err(), "The StopPodSandbox should be done in time") + default: + } + + err := runtimeService.StopPodSandbox(sbID) + if err != nil { + t.Logf("Failed to StopPodSandbox: %v", err) + continue + } + + err = runtimeService.RemovePodSandbox(sbID) + if err == nil { + break + } + t.Logf("Failed to RemovePodSandbox: %v", err) + time.Sleep(1 * time.Second) + } + + t.Logf("PodSandbox %s has been deleted and start to wait for strace exit", sbID) + select { + case <-time.After(15 * time.Second): + resp, err := shimCli.Connect(ctx, &apitask.ConnectRequest{}) + assert.Error(t, err, "should failed to call shim connect API") + + t.Errorf("Strace doesn't exit in time") + + t.Logf("Cleanup the shim (pid: %d)", resp.ShimPid) + syscall.Kill(int(resp.ShimPid), syscall.SIGKILL) + <-doneCh + case <-doneCh: + } +} + +// injectDelayToUmount2 uses strace(1) to inject delay on umount2 syscall to +// simulate IO pressure because umount2 might force kernel to syncfs, for +// example, umount overlayfs rootfs which doesn't with volatile. +// +// REF: https://man7.org/linux/man-pages/man1/strace.1.html +func injectDelayToUmount2(ctx context.Context, t *testing.T, shimCli apitask.TaskService, delayInSec int) chan struct{} { + pid := shimPid(ctx, t, shimCli) + + doneCh := make(chan struct{}) + + cmd := exec.CommandContext(ctx, "strace", + "-p", strconv.Itoa(int(pid)), "-f", // attach to all the threads + "--detach-on=execve", // stop to attach runc child-processes + "--trace=umount2", // only trace umount2 syscall + "-e", "inject=umount2:delay_enter="+strconv.Itoa(delayInSec)+"s", + ) + cmd.SysProcAttr = &syscall.SysProcAttr{Pdeathsig: syscall.SIGKILL} + + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + cmd.Stderr = pipeW + + require.NoError(t, cmd.Start()) + + // ensure that strace has attached to the shim + readyCh := make(chan struct{}) + go func() { + defer close(doneCh) + + bufReader := bufio.NewReader(pipeR) + _, err := bufReader.Peek(1) + assert.NoError(t, err, "failed to ensure that strace has attached to shim") + + close(readyCh) + io.Copy(os.Stdout, bufReader) + t.Logf("Strace has exited") + }() + + go func() { + defer pipeW.Close() + assert.NoError(t, cmd.Wait(), "strace should exit with zero code") + }() + + <-readyCh + return doneCh +} + +func connectToShim(ctx context.Context, t *testing.T, id string) apitask.TaskService { + addr, err := shim.SocketAddress(ctx, containerdEndpoint, id) + require.NoError(t, err) + addr = strings.TrimPrefix(addr, "unix://") + + conn, err := net.Dial("unix", addr) + require.NoError(t, err) + + client := ttrpc.NewClient(conn) + return apitask.NewTaskClient(client) +} + +func shimPid(ctx context.Context, t *testing.T, shimCli apitask.TaskService) uint32 { + resp, err := shimCli.Connect(ctx, &apitask.ConnectRequest{}) + require.NoError(t, err) + return resp.ShimPid +} diff --git a/integration/main_test.go b/integration/main_test.go index 33e1054..40c0a67 100644 --- a/integration/main_test.go +++ b/integration/main_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,32 +17,38 @@ package integration import ( + "bytes" "context" "encoding/json" + "errors" "flag" "fmt" + "io" "os" - "os/exec" + "path/filepath" + goruntime "runtime" "strconv" "strings" + "syscall" "testing" "time" "github.com/containerd/containerd" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - cri "k8s.io/cri-api/pkg/apis" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - + "github.com/containerd/containerd/containers" + cri "github.com/containerd/containerd/integration/cri-api/pkg/apis" "github.com/containerd/containerd/integration/remote" dialer "github.com/containerd/containerd/integration/util" criconfig "github.com/containerd/containerd/pkg/cri/config" "github.com/containerd/containerd/pkg/cri/constants" "github.com/containerd/containerd/pkg/cri/server" "github.com/containerd/containerd/pkg/cri/util" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + exec "golang.org/x/sys/execabs" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) const ( @@ -78,28 +82,29 @@ func ConnectDaemons() error { var err error runtimeService, err = remote.NewRuntimeService(*criEndpoint, timeout) if err != nil { - return errors.Wrap(err, "failed to create runtime service") + return fmt.Errorf("failed to create runtime service: %w", err) } imageService, err = remote.NewImageService(*criEndpoint, timeout) if err != nil { - return errors.Wrap(err, "failed to create image service") + return fmt.Errorf("failed to create image service: %w", err) } // Since CRI grpc client doesn't have `WithBlock` specified, we // need to check whether it is actually connected. - // TODO(random-liu): Extend cri remote client to accept extra grpc options. + // TODO(#6069) Use grpc options to block on connect and remove for this list containers request. _, err = runtimeService.ListContainers(&runtime.ContainerFilter{}) if err != nil { - return errors.Wrap(err, "failed to list containers") + return fmt.Errorf("failed to list containers: %w", err) } _, err = imageService.ListImages(&runtime.ImageFilter{}) if err != nil { - return errors.Wrap(err, "failed to list images") + return fmt.Errorf("failed to list images: %w", err) } // containerdEndpoint is the same with criEndpoint now containerdEndpoint = strings.TrimPrefix(*criEndpoint, "unix://") + containerdEndpoint = strings.TrimPrefix(containerdEndpoint, "npipe:") containerdClient, err = containerd.New(containerdEndpoint, containerd.WithDefaultNamespace(k8sNamespace)) if err != nil { - return errors.Wrap(err, "failed to connect containerd") + return fmt.Errorf("failed to connect containerd: %w", err) } return nil } @@ -163,6 +168,15 @@ func WithPodHostname(hostname string) PodSandboxOpts { } } +// Add pod labels. +func WithPodLabels(kvs map[string]string) PodSandboxOpts { + return func(p *runtime.PodSandboxConfig) { + for k, v := range kvs { + p.Labels[k] = v + } + } +} + // PodSandboxConfig generates a pod sandbox config for test. func PodSandboxConfig(name, ns string, opts ...PodSandboxOpts) *runtime.PodSandboxConfig { config := &runtime.PodSandboxConfig{ @@ -173,7 +187,9 @@ func PodSandboxConfig(name, ns string, opts ...PodSandboxOpts) *runtime.PodSandb Uid: util.GenerateID(), Namespace: Randomize(ns), }, - Linux: &runtime.LinuxPodSandboxConfig{}, + Linux: &runtime.LinuxPodSandboxConfig{}, + Annotations: make(map[string]string), + Labels: make(map[string]string), } for _, opt := range opts { opt(config) @@ -181,6 +197,29 @@ func PodSandboxConfig(name, ns string, opts ...PodSandboxOpts) *runtime.PodSandb return config } +func PodSandboxConfigWithCleanup(t *testing.T, name, ns string, opts ...PodSandboxOpts) (string, *runtime.PodSandboxConfig) { + sbConfig := PodSandboxConfig(name, ns, opts...) + sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, runtimeService.StopPodSandbox(sb)) + assert.NoError(t, runtimeService.RemovePodSandbox(sb)) + }) + + return sb, sbConfig +} + +// Set Windows HostProcess on the pod. +func WithWindowsHostProcessPod(p *runtime.PodSandboxConfig) { + if p.Windows == nil { + p.Windows = &runtime.WindowsPodSandboxConfig{} + } + if p.Windows.SecurityContext == nil { + p.Windows.SecurityContext = &runtime.WindowsSandboxSecurityContext{} + } + p.Windows.SecurityContext.HostProcess = true +} + // ContainerOpts to set any specific attribute like labels, // annotations, metadata etc type ContainerOpts func(*runtime.ContainerConfig) @@ -207,6 +246,39 @@ func WithResources(r *runtime.LinuxContainerResources) ContainerOpts { } } +func WithVolumeMount(hostPath, containerPath string) ContainerOpts { + return func(c *runtime.ContainerConfig) { + hostPath, _ = filepath.Abs(hostPath) + containerPath, _ = filepath.Abs(containerPath) + mount := &runtime.Mount{HostPath: hostPath, ContainerPath: containerPath} + c.Mounts = append(c.Mounts, mount) + } +} + +func WithWindowsUsername(username string) ContainerOpts { + return func(c *runtime.ContainerConfig) { + if c.Windows == nil { + c.Windows = &runtime.WindowsContainerConfig{} + } + if c.Windows.SecurityContext == nil { + c.Windows.SecurityContext = &runtime.WindowsContainerSecurityContext{} + } + c.Windows.SecurityContext.RunAsUsername = username + } +} + +func WithWindowsHostProcessContainer() ContainerOpts { + return func(c *runtime.ContainerConfig) { + if c.Windows == nil { + c.Windows = &runtime.WindowsContainerConfig{} + } + if c.Windows.SecurityContext == nil { + c.Windows.SecurityContext = &runtime.WindowsContainerSecurityContext{} + } + c.Windows.SecurityContext.HostProcess = true + } +} + // Add container command. func WithCommand(cmd string, args ...string) ContainerOpts { return func(c *runtime.ContainerConfig) { @@ -239,6 +311,45 @@ func WithLogPath(path string) ContainerOpts { } } +// WithRunAsUser sets the uid. +func WithRunAsUser(uid int64) ContainerOpts { + return func(c *runtime.ContainerConfig) { + if c.Linux == nil { + c.Linux = &runtime.LinuxContainerConfig{} + } + if c.Linux.SecurityContext == nil { + c.Linux.SecurityContext = &runtime.LinuxContainerSecurityContext{} + } + c.Linux.SecurityContext.RunAsUser = &runtime.Int64Value{Value: uid} + } +} + +// WithRunAsUsername sets the username. +func WithRunAsUsername(username string) ContainerOpts { + return func(c *runtime.ContainerConfig) { + if c.Linux == nil { + c.Linux = &runtime.LinuxContainerConfig{} + } + if c.Linux.SecurityContext == nil { + c.Linux.SecurityContext = &runtime.LinuxContainerSecurityContext{} + } + c.Linux.SecurityContext.RunAsUsername = username + } +} + +// WithRunAsGroup sets the gid. +func WithRunAsGroup(gid int64) ContainerOpts { + return func(c *runtime.ContainerConfig) { + if c.Linux == nil { + c.Linux = &runtime.LinuxContainerConfig{} + } + if c.Linux.SecurityContext == nil { + c.Linux.SecurityContext = &runtime.LinuxContainerSecurityContext{} + } + c.Linux.SecurityContext.RunAsGroup = &runtime.Int64Value{Value: gid} + } +} + // WithSupplementalGroups adds supplemental groups. func WithSupplementalGroups(gids []int64) ContainerOpts { return func(c *runtime.ContainerConfig) { @@ -316,10 +427,17 @@ func Randomize(str string) string { } // KillProcess kills the process by name. pkill is used. -func KillProcess(name string) error { - output, err := exec.Command("pkill", "-x", fmt.Sprintf("^%s$", name)).CombinedOutput() +func KillProcess(name string, signal syscall.Signal) error { + var command []string + if goruntime.GOOS == "windows" { + command = []string{"taskkill", "/IM", name, "/F"} + } else { + command = []string{"pkill", "-" + strconv.Itoa(int(signal)), "-x", fmt.Sprintf("^%s$", name)} + } + + output, err := exec.Command(command[0], command[1:]...).CombinedOutput() if err != nil { - return errors.Errorf("failed to kill %q - error: %v, output: %q", name, err, output) + return fmt.Errorf("failed to kill %q - error: %v, output: %q", name, err, output) } return nil } @@ -328,7 +446,7 @@ func KillProcess(name string) error { func KillPid(pid int) error { output, err := exec.Command("kill", strconv.Itoa(pid)).CombinedOutput() if err != nil { - return errors.Errorf("failed to kill %d - error: %v, output: %q", pid, err, output) + return fmt.Errorf("failed to kill %d - error: %v, output: %q", pid, err, output) } return nil } @@ -339,24 +457,101 @@ func PidOf(name string) (int, error) { output := strings.TrimSpace(string(b)) if err != nil { if len(output) != 0 { - return 0, errors.Errorf("failed to run pidof %q - error: %v, output: %q", name, err, output) + return 0, fmt.Errorf("failed to run pidof %q - error: %v, output: %q", name, err, output) } return 0, nil } return strconv.Atoi(output) } +// PidsOf returns pid(s) of a process by name +func PidsOf(name string) ([]int, error) { + if len(name) == 0 { + return []int{}, fmt.Errorf("name is required") + } + + procDirFD, err := os.Open("/proc") + if err != nil { + return nil, fmt.Errorf("failed to open /proc: %w", err) + } + defer procDirFD.Close() + + res := []int{} + for { + fileInfos, err := procDirFD.Readdir(100) + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to readdir: %w", err) + } + + for _, fileInfo := range fileInfos { + if !fileInfo.IsDir() { + continue + } + + pid, err := strconv.Atoi(fileInfo.Name()) + if err != nil { + continue + } + + exePath, err := os.Readlink(filepath.Join("/proc", fileInfo.Name(), "exe")) + if err != nil { + continue + } + + if strings.HasSuffix(exePath, name) { + res = append(res, pid) + } + } + } + return res, nil +} + +// PidEnvs returns the environ of pid in key-value pairs. +func PidEnvs(pid int) (map[string]string, error) { + envPath := filepath.Join("/proc", strconv.Itoa(pid), "environ") + + b, err := os.ReadFile(envPath) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", envPath, err) + } + + values := bytes.Split(b, []byte{0}) + if len(values) == 0 { + return nil, nil + } + + res := make(map[string]string) + for _, value := range values { + value := strings.TrimSpace(string(value)) + if len(value) == 0 { + continue + } + + parts := strings.SplitN(value, "=", 2) + if len(parts) == 2 { + res[parts[0]] = parts[1] + } + } + return res, nil +} + // RawRuntimeClient returns a raw grpc runtime service client. func RawRuntimeClient() (runtime.RuntimeServiceClient, error) { addr, dialer, err := dialer.GetAddressAndDialer(*criEndpoint) if err != nil { - return nil, errors.Wrap(err, "failed to get dialer") + return nil, fmt.Errorf("failed to get dialer: %w", err) } ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + conn, err := grpc.DialContext(ctx, addr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) if err != nil { - return nil, errors.Wrap(err, "failed to connect cri endpoint") + return nil, fmt.Errorf("failed to connect cri endpoint: %w", err) } return runtime.NewRuntimeServiceClient(conn), nil } @@ -365,15 +560,15 @@ func RawRuntimeClient() (runtime.RuntimeServiceClient, error) { func CRIConfig() (*criconfig.Config, error) { client, err := RawRuntimeClient() if err != nil { - return nil, errors.Wrap(err, "failed to get raw runtime client") + return nil, fmt.Errorf("failed to get raw runtime client: %w", err) } resp, err := client.Status(context.Background(), &runtime.StatusRequest{Verbose: true}) if err != nil { - return nil, errors.Wrap(err, "failed to get status") + return nil, fmt.Errorf("failed to get status: %w", err) } config := &criconfig.Config{} if err := json.Unmarshal([]byte(resp.Info["config"]), config); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal config") + return nil, fmt.Errorf("failed to unmarshal config: %w", err) } return config, nil } @@ -382,25 +577,25 @@ func CRIConfig() (*criconfig.Config, error) { func SandboxInfo(id string) (*runtime.PodSandboxStatus, *server.SandboxInfo, error) { client, err := RawRuntimeClient() if err != nil { - return nil, nil, errors.Wrap(err, "failed to get raw runtime client") + return nil, nil, fmt.Errorf("failed to get raw runtime client: %w", err) } resp, err := client.PodSandboxStatus(context.Background(), &runtime.PodSandboxStatusRequest{ PodSandboxId: id, Verbose: true, }) if err != nil { - return nil, nil, errors.Wrap(err, "failed to get sandbox status") + return nil, nil, fmt.Errorf("failed to get sandbox status: %w", err) } status := resp.GetStatus() var info server.SandboxInfo if err := json.Unmarshal([]byte(resp.GetInfo()["info"]), &info); err != nil { - return nil, nil, errors.Wrap(err, "failed to unmarshal sandbox info") + return nil, nil, fmt.Errorf("failed to unmarshal sandbox info: %w", err) } return status, &info, nil } -func RestartContainerd(t *testing.T) { - require.NoError(t, KillProcess(*containerdBin)) +func RestartContainerd(t *testing.T, signal syscall.Signal) { + require.NoError(t, KillProcess(*containerdBin, signal)) // Use assert so that the 3rd wait always runs, this makes sure // containerd is running before this function returns. @@ -416,3 +611,7 @@ func RestartContainerd(t *testing.T) { return ConnectDaemons() == nil, nil }, time.Second, 30*time.Second), "wait for containerd to be restarted") } + +func GetContainer(id string) (containers.Container, error) { + return containerdClient.ContainerService().Get(context.Background(), id) +} diff --git a/integration/no_metadata_test.go b/integration/no_metadata_test.go index 5766e19..c992dce 100644 --- a/integration/no_metadata_test.go +++ b/integration/no_metadata_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -22,7 +20,7 @@ import ( "testing" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestRunPodSandboxWithoutMetadata(t *testing.T) { @@ -34,16 +32,9 @@ func TestRunPodSandboxWithoutMetadata(t *testing.T) { } func TestCreateContainerWithoutMetadata(t *testing.T) { - sbConfig := PodSandboxConfig("sandbox", "container-create") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - // Make sure the sandbox is cleaned up in any case. - runtimeService.StopPodSandbox(sb) - runtimeService.RemovePodSandbox(sb) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "container-create") config := &runtime.ContainerConfig{} - _, err = runtimeService.CreateContainer(sb, config, sbConfig) + _, err := runtimeService.CreateContainer(sb, config, sbConfig) require.Error(t, err) _, err = runtimeService.Status() require.NoError(t, err) diff --git a/integration/pod_dualstack_test.go b/integration/pod_dualstack_test.go index 95c22b5..14231f2 100644 --- a/integration/pod_dualstack_test.go +++ b/integration/pod_dualstack_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,49 +17,43 @@ package integration import ( - "io/ioutil" "net" "os" "path/filepath" "regexp" + goruntime "runtime" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestPodDualStack(t *testing.T) { - testPodLogDir, err := ioutil.TempDir("/tmp", "dualstack") - require.NoError(t, err) - defer os.RemoveAll(testPodLogDir) + testPodLogDir := t.TempDir() t.Log("Create a sandbox") - sbConfig := PodSandboxConfig("sandbox", "dualstack", WithPodLogDirectory(testPodLogDir)) - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "dualstack", WithPodLogDirectory(testPodLogDir)) var ( testImage = GetImage(BusyBox) containerName = "test-container" ) - t.Logf("Pull test image %q", testImage) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + + EnsureImageExists(t, testImage) t.Log("Create a container to print env") + var command ContainerOpts + if goruntime.GOOS == "windows" { + command = WithCommand("ipconfig") + } else { + command = WithCommand("ip", "address", "show", "dev", "eth0") + } cnConfig := ContainerConfig( containerName, testImage, - WithCommand("ip", "address", "show", "dev", "eth0"), + command, WithLogPath(containerName), ) cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) @@ -82,16 +74,25 @@ func TestPodDualStack(t *testing.T) { return false, nil }, time.Second, 30*time.Second)) - content, err := ioutil.ReadFile(filepath.Join(testPodLogDir, containerName)) + content, err := os.ReadFile(filepath.Join(testPodLogDir, containerName)) assert.NoError(t, err) status, err := runtimeService.PodSandboxStatus(sb) require.NoError(t, err) ip := status.GetNetwork().GetIp() additionalIps := status.GetNetwork().GetAdditionalIps() - ipv4Enabled, err := regexp.MatchString("inet .* scope global", string(content)) + var ipv4Regex, ipv6Regex string + if goruntime.GOOS == "windows" { + ipv4Regex = "^\\s*IPv4 Address" + ipv6Regex = "^\\s*IPv6 Address" + } else { + ipv4Regex = "inet .* scope global" + ipv6Regex = "inet6 .* scope global" + } + + ipv4Enabled, err := regexp.MatchString(ipv4Regex, string(content)) assert.NoError(t, err) - ipv6Enabled, err := regexp.MatchString("inet6 .* scope global", string(content)) + ipv6Enabled, err := regexp.MatchString(ipv6Regex, string(content)) assert.NoError(t, err) if ipv4Enabled && ipv6Enabled { diff --git a/integration/pod_hostname_test.go b/integration/pod_hostname_test.go index 38a02c4..c7a4732 100644 --- a/integration/pod_hostname_test.go +++ b/integration/pod_hostname_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,15 +17,16 @@ package integration import ( - "io/ioutil" "os" "path/filepath" + goruntime "runtime" + "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestPodHostname(t *testing.T) { @@ -37,6 +36,7 @@ func TestPodHostname(t *testing.T) { opts []PodSandboxOpts expectedHostname string expectErr bool + needsHostNetwork bool }{ "regular pod with custom hostname": { opts: []PodSandboxOpts{ @@ -49,19 +49,22 @@ func TestPodHostname(t *testing.T) { WithHostNetwork, }, expectedHostname: hostname, + needsHostNetwork: true, }, "host network pod with custom hostname should fail": { opts: []PodSandboxOpts{ WithHostNetwork, WithPodHostname("test-hostname"), }, - expectErr: true, + expectErr: true, + needsHostNetwork: true, }, } { t.Run(name, func(t *testing.T) { - testPodLogDir, err := ioutil.TempDir("/tmp", "hostname") - require.NoError(t, err) - defer os.RemoveAll(testPodLogDir) + if test.needsHostNetwork && goruntime.GOOS == "windows" { + t.Skip("Skipped on Windows.") + } + testPodLogDir := t.TempDir() opts := append(test.opts, WithPodLogDirectory(testPodLogDir)) t.Log("Create a sandbox with hostname") @@ -86,19 +89,15 @@ func TestPodHostname(t *testing.T) { testImage = GetImage(BusyBox) containerName = "test-container" ) - t.Logf("Pull test image %q", testImage) - img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + + EnsureImageExists(t, testImage) t.Log("Create a container to print env") cnConfig := ContainerConfig( containerName, testImage, WithCommand("sh", "-c", - "echo -n /etc/hostname= && cat /etc/hostname && env"), + "echo -n /etc/hostname= && hostname && env"), WithLogPath(containerName), ) cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) @@ -119,11 +118,15 @@ func TestPodHostname(t *testing.T) { return false, nil }, time.Second, 30*time.Second)) - content, err := ioutil.ReadFile(filepath.Join(testPodLogDir, containerName)) + content, err := os.ReadFile(filepath.Join(testPodLogDir, containerName)) assert.NoError(t, err) t.Log("Search hostname env in container log") - assert.Contains(t, string(content), "HOSTNAME="+test.expectedHostname) + if goruntime.GOOS == "windows" { + assert.Contains(t, string(content), "COMPUTERNAME="+strings.ToUpper(test.expectedHostname)) + } else { + assert.Contains(t, string(content), "HOSTNAME="+test.expectedHostname) + } t.Log("Search /etc/hostname content in container log") assert.Contains(t, string(content), "/etc/hostname="+test.expectedHostname) diff --git a/integration/remote/remote_image.go b/integration/remote/remote_image.go index 1212277..16088a9 100644 --- a/integration/remote/remote_image.go +++ b/integration/remote/remote_image.go @@ -39,10 +39,11 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "k8s.io/klog/v2" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "github.com/containerd/containerd/integration/cri-api/pkg/apis" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/integration/remote/util" ) @@ -64,7 +65,11 @@ func NewImageService(endpoint string, connectionTimeout time.Duration) (internal ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) defer cancel() - conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithContextDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + conn, err := grpc.DialContext(ctx, addr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), + ) if err != nil { klog.Errorf("Connect remote image service %s failed: %v", addr, err) return nil, err @@ -77,13 +82,13 @@ func NewImageService(endpoint string, connectionTimeout time.Duration) (internal } // ListImages lists available images. -func (r *ImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { +func (r *ImageService) ListImages(filter *runtimeapi.ImageFilter, opts ...grpc.CallOption) ([]*runtimeapi.Image, error) { ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() resp, err := r.imageClient.ListImages(ctx, &runtimeapi.ListImagesRequest{ Filter: filter, - }) + }, opts...) if err != nil { klog.Errorf("ListImages with filter %+v from image service failed: %v", filter, err) return nil, err @@ -93,13 +98,13 @@ func (r *ImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi } // ImageStatus returns the status of the image. -func (r *ImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) { +func (r *ImageService) ImageStatus(image *runtimeapi.ImageSpec, opts ...grpc.CallOption) (*runtimeapi.Image, error) { ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() resp, err := r.imageClient.ImageStatus(ctx, &runtimeapi.ImageStatusRequest{ Image: image, - }) + }, opts...) if err != nil { klog.Errorf("ImageStatus %q from image service failed: %v", image.Image, err) return nil, err @@ -117,7 +122,7 @@ func (r *ImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Ima } // PullImage pulls an image with authentication config. -func (r *ImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *ImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig, opts ...grpc.CallOption) (string, error) { ctx, cancel := getContextWithCancel() defer cancel() @@ -125,7 +130,7 @@ func (r *ImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.A Image: image, Auth: auth, SandboxConfig: podSandboxConfig, - }) + }, opts...) if err != nil { klog.Errorf("PullImage %q from image service failed: %v", image.Image, err) return "", err @@ -141,13 +146,13 @@ func (r *ImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.A } // RemoveImage removes the image. -func (r *ImageService) RemoveImage(image *runtimeapi.ImageSpec) error { +func (r *ImageService) RemoveImage(image *runtimeapi.ImageSpec, opts ...grpc.CallOption) error { ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() _, err := r.imageClient.RemoveImage(ctx, &runtimeapi.RemoveImageRequest{ Image: image, - }) + }, opts...) if err != nil { klog.Errorf("RemoveImage %q from image service failed: %v", image.Image, err) return err @@ -157,13 +162,13 @@ func (r *ImageService) RemoveImage(image *runtimeapi.ImageSpec) error { } // ImageFsInfo returns information of the filesystem that is used to store images. -func (r *ImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { +func (r *ImageService) ImageFsInfo(opts ...grpc.CallOption) ([]*runtimeapi.FilesystemUsage, error) { // Do not set timeout, because `ImageFsInfo` takes time. // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? ctx, cancel := getContextWithCancel() defer cancel() - resp, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}) + resp, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}, opts...) if err != nil { klog.Errorf("ImageFsInfo from image service failed: %v", err) return nil, err diff --git a/integration/remote/remote_runtime.go b/integration/remote/remote_runtime.go index 8a1a271..b172b94 100644 --- a/integration/remote/remote_runtime.go +++ b/integration/remote/remote_runtime.go @@ -40,11 +40,12 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "k8s.io/klog/v2" + internalapi "github.com/containerd/containerd/integration/cri-api/pkg/apis" "k8s.io/component-base/logs/logreduction" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" utilexec "k8s.io/utils/exec" "github.com/containerd/containerd/integration/remote/util" @@ -73,7 +74,11 @@ func NewRuntimeService(endpoint string, connectionTimeout time.Duration) (intern ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) defer cancel() - conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithContextDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + conn, err := grpc.DialContext(ctx, addr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), + ) if err != nil { klog.Errorf("Connect remote runtime %s failed: %v", addr, err) return nil, err @@ -87,7 +92,7 @@ func NewRuntimeService(endpoint string, connectionTimeout time.Duration) (intern } // Version returns the runtime name, runtime version and runtime API version. -func (r *RuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { +func (r *RuntimeService) Version(apiVersion string, opts ...grpc.CallOption) (*runtimeapi.VersionResponse, error) { klog.V(10).Infof("[RuntimeService] Version (apiVersion=%v, timeout=%v)", apiVersion, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) @@ -95,7 +100,7 @@ func (r *RuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse typedVersion, err := r.runtimeClient.Version(ctx, &runtimeapi.VersionRequest{ Version: apiVersion, - }) + }, opts...) if err != nil { klog.Errorf("Version from runtime service failed: %v", err) return nil, err @@ -112,7 +117,7 @@ func (r *RuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // the sandbox is in ready state. -func (r *RuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { +func (r *RuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string, opts ...grpc.CallOption) (string, error) { // Use 2 times longer timeout for sandbox operation (4 mins by default) // TODO: Make the pod sandbox timeout configurable. timeout := r.timeout * 2 @@ -125,7 +130,7 @@ func (r *RuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runt resp, err := r.runtimeClient.RunPodSandbox(ctx, &runtimeapi.RunPodSandboxRequest{ Config: config, RuntimeHandler: runtimeHandler, - }) + }, opts...) if err != nil { klog.Errorf("RunPodSandbox from runtime service failed: %v", err) return "", err @@ -144,7 +149,7 @@ func (r *RuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runt // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be forced to termination. -func (r *RuntimeService) StopPodSandbox(podSandBoxID string) error { +func (r *RuntimeService) StopPodSandbox(podSandBoxID string, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] StopPodSandbox (podSandboxID=%v, timeout=%v)", podSandBoxID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) @@ -152,7 +157,7 @@ func (r *RuntimeService) StopPodSandbox(podSandBoxID string) error { _, err := r.runtimeClient.StopPodSandbox(ctx, &runtimeapi.StopPodSandboxRequest{ PodSandboxId: podSandBoxID, - }) + }, opts...) if err != nil { klog.Errorf("StopPodSandbox %q from runtime service failed: %v", podSandBoxID, err) return err @@ -165,14 +170,14 @@ func (r *RuntimeService) StopPodSandbox(podSandBoxID string) error { // RemovePodSandbox removes the sandbox. If there are any containers in the // sandbox, they should be forcibly removed. -func (r *RuntimeService) RemovePodSandbox(podSandBoxID string) error { +func (r *RuntimeService) RemovePodSandbox(podSandBoxID string, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] RemovePodSandbox (podSandboxID=%v, timeout=%v)", podSandBoxID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() _, err := r.runtimeClient.RemovePodSandbox(ctx, &runtimeapi.RemovePodSandboxRequest{ PodSandboxId: podSandBoxID, - }) + }, opts...) if err != nil { klog.Errorf("RemovePodSandbox %q from runtime service failed: %v", podSandBoxID, err) return err @@ -184,14 +189,14 @@ func (r *RuntimeService) RemovePodSandbox(podSandBoxID string) error { } // PodSandboxStatus returns the status of the PodSandbox. -func (r *RuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeapi.PodSandboxStatus, error) { +func (r *RuntimeService) PodSandboxStatus(podSandBoxID string, opts ...grpc.CallOption) (*runtimeapi.PodSandboxStatus, error) { klog.V(10).Infof("[RuntimeService] PodSandboxStatus (podSandboxID=%v, timeout=%v)", podSandBoxID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() resp, err := r.runtimeClient.PodSandboxStatus(ctx, &runtimeapi.PodSandboxStatusRequest{ PodSandboxId: podSandBoxID, - }) + }, opts...) if err != nil { return nil, err } @@ -208,14 +213,14 @@ func (r *RuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeapi.PodS } // ListPodSandbox returns a list of PodSandboxes. -func (r *RuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { +func (r *RuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter, opts ...grpc.CallOption) ([]*runtimeapi.PodSandbox, error) { klog.V(10).Infof("[RuntimeService] ListPodSandbox (filter=%v, timeout=%v)", filter, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() resp, err := r.runtimeClient.ListPodSandbox(ctx, &runtimeapi.ListPodSandboxRequest{ Filter: filter, - }) + }, opts...) if err != nil { klog.Errorf("ListPodSandbox with filter %+v from runtime service failed: %v", filter, err) return nil, err @@ -227,7 +232,7 @@ func (r *RuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([] } // CreateContainer creates a new container in the specified PodSandbox. -func (r *RuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *RuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig, opts ...grpc.CallOption) (string, error) { klog.V(10).Infof("[RuntimeService] CreateContainer (podSandBoxID=%v, timeout=%v)", podSandBoxID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() @@ -236,7 +241,7 @@ func (r *RuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi PodSandboxId: podSandBoxID, Config: config, SandboxConfig: sandboxConfig, - }) + }, opts...) if err != nil { klog.Errorf("CreateContainer in sandbox %q from runtime service failed: %v", podSandBoxID, err) return "", err @@ -253,14 +258,14 @@ func (r *RuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi } // StartContainer starts the container. -func (r *RuntimeService) StartContainer(containerID string) error { +func (r *RuntimeService) StartContainer(containerID string, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] StartContainer (containerID=%v, timeout=%v)", containerID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() _, err := r.runtimeClient.StartContainer(ctx, &runtimeapi.StartContainerRequest{ ContainerId: containerID, - }) + }, opts...) if err != nil { klog.Errorf("StartContainer %q from runtime service failed: %v", containerID, err) return err @@ -271,7 +276,7 @@ func (r *RuntimeService) StartContainer(containerID string) error { } // StopContainer stops a running container with a grace period (i.e., timeout). -func (r *RuntimeService) StopContainer(containerID string, timeout int64) error { +func (r *RuntimeService) StopContainer(containerID string, timeout int64, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] StopContainer (containerID=%v, timeout=%v)", containerID, timeout) // Use timeout + default timeout (2 minutes) as timeout to leave extra time // for SIGKILL container and request latency. @@ -283,7 +288,7 @@ func (r *RuntimeService) StopContainer(containerID string, timeout int64) error _, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{ ContainerId: containerID, Timeout: timeout, - }) + }, opts...) if err != nil { klog.Errorf("StopContainer %q from runtime service failed: %v", containerID, err) return err @@ -295,7 +300,7 @@ func (r *RuntimeService) StopContainer(containerID string, timeout int64) error // RemoveContainer removes the container. If the container is running, the container // should be forced to removal. -func (r *RuntimeService) RemoveContainer(containerID string) error { +func (r *RuntimeService) RemoveContainer(containerID string, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] RemoveContainer (containerID=%v, timeout=%v)", containerID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() @@ -303,7 +308,7 @@ func (r *RuntimeService) RemoveContainer(containerID string) error { r.logReduction.ClearID(containerID) _, err := r.runtimeClient.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{ ContainerId: containerID, - }) + }, opts...) if err != nil { klog.Errorf("RemoveContainer %q from runtime service failed: %v", containerID, err) return err @@ -314,14 +319,14 @@ func (r *RuntimeService) RemoveContainer(containerID string) error { } // ListContainers lists containers by filters. -func (r *RuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { +func (r *RuntimeService) ListContainers(filter *runtimeapi.ContainerFilter, opts ...grpc.CallOption) ([]*runtimeapi.Container, error) { klog.V(10).Infof("[RuntimeService] ListContainers (filter=%v, timeout=%v)", filter, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() resp, err := r.runtimeClient.ListContainers(ctx, &runtimeapi.ListContainersRequest{ Filter: filter, - }) + }, opts...) if err != nil { klog.Errorf("ListContainers with filter %+v from runtime service failed: %v", filter, err) return nil, err @@ -332,14 +337,14 @@ func (r *RuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]* } // ContainerStatus returns the container status. -func (r *RuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) { +func (r *RuntimeService) ContainerStatus(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStatus, error) { klog.V(10).Infof("[RuntimeService] ContainerStatus (containerID=%v, timeout=%v)", containerID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeapi.ContainerStatusRequest{ ContainerId: containerID, - }) + }, opts...) if err != nil { // Don't spam the log with endless messages about the same failure. if r.logReduction.ShouldMessageBePrinted(err.Error(), containerID) { @@ -361,7 +366,7 @@ func (r *RuntimeService) ContainerStatus(containerID string) (*runtimeapi.Contai } // UpdateContainerResources updates a containers resource config -func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error { +func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] UpdateContainerResources (containerID=%v, timeout=%v)", containerID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() @@ -369,7 +374,8 @@ func (r *RuntimeService) UpdateContainerResources(containerID string, resources _, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{ ContainerId: containerID, Linux: resources, - }) + Windows: windowsResources, + }, opts...) if err != nil { klog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err) return err @@ -381,7 +387,7 @@ func (r *RuntimeService) UpdateContainerResources(containerID string, resources // ExecSync executes a command in the container, and returns the stdout output. // If command exits with a non-zero exit code, an error is returned. -func (r *RuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { +func (r *RuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration, opts ...grpc.CallOption) (stdout []byte, stderr []byte, err error) { klog.V(10).Infof("[RuntimeService] ExecSync (containerID=%v, timeout=%v)", containerID, timeout) // Do not set timeout when timeout is 0. var ctx context.Context @@ -401,7 +407,7 @@ func (r *RuntimeService) ExecSync(containerID string, cmd []string, timeout time Cmd: cmd, Timeout: timeoutSeconds, } - resp, err := r.runtimeClient.ExecSync(ctx, req) + resp, err := r.runtimeClient.ExecSync(ctx, req, opts...) if err != nil { klog.Errorf("ExecSync %s '%s' from runtime service failed: %v", containerID, strings.Join(cmd, " "), err) return nil, nil, err @@ -420,12 +426,12 @@ func (r *RuntimeService) ExecSync(containerID string, cmd []string, timeout time } // Exec prepares a streaming endpoint to execute a command in the container, and returns the address. -func (r *RuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (r *RuntimeService) Exec(req *runtimeapi.ExecRequest, opts ...grpc.CallOption) (*runtimeapi.ExecResponse, error) { klog.V(10).Infof("[RuntimeService] Exec (timeout=%v)", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.Exec(ctx, req) + resp, err := r.runtimeClient.Exec(ctx, req, opts...) if err != nil { klog.Errorf("Exec %s '%s' from runtime service failed: %v", req.ContainerId, strings.Join(req.Cmd, " "), err) return nil, err @@ -442,12 +448,12 @@ func (r *RuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResp } // Attach prepares a streaming endpoint to attach to a running container, and returns the address. -func (r *RuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (r *RuntimeService) Attach(req *runtimeapi.AttachRequest, opts ...grpc.CallOption) (*runtimeapi.AttachResponse, error) { klog.V(10).Infof("[RuntimeService] Attach (containerId=%v, timeout=%v)", req.ContainerId, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.Attach(ctx, req) + resp, err := r.runtimeClient.Attach(ctx, req, opts...) if err != nil { klog.Errorf("Attach %s from runtime service failed: %v", req.ContainerId, err) return nil, err @@ -463,12 +469,12 @@ func (r *RuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.Atta } // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. -func (r *RuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (r *RuntimeService) PortForward(req *runtimeapi.PortForwardRequest, opts ...grpc.CallOption) (*runtimeapi.PortForwardResponse, error) { klog.V(10).Infof("[RuntimeService] PortForward (podSandboxID=%v, port=%v, timeout=%v)", req.PodSandboxId, req.Port, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.PortForward(ctx, req) + resp, err := r.runtimeClient.PortForward(ctx, req, opts...) if err != nil { klog.Errorf("PortForward %s from runtime service failed: %v", req.PodSandboxId, err) return nil, err @@ -487,7 +493,7 @@ func (r *RuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runti // UpdateRuntimeConfig updates the config of a runtime service. The only // update payload currently supported is the pod CIDR assigned to a node, // and the runtime service just proxies it down to the network plugin. -func (r *RuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error { +func (r *RuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] UpdateRuntimeConfig (runtimeConfig=%v, timeout=%v)", runtimeConfig, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() @@ -497,7 +503,7 @@ func (r *RuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeCo // really looking to surface destination unreachable. _, err := r.runtimeClient.UpdateRuntimeConfig(ctx, &runtimeapi.UpdateRuntimeConfigRequest{ RuntimeConfig: runtimeConfig, - }) + }, opts...) if err != nil { return err @@ -508,12 +514,12 @@ func (r *RuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeCo } // Status returns the status of the runtime. -func (r *RuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { +func (r *RuntimeService) Status(opts ...grpc.CallOption) (*runtimeapi.RuntimeStatus, error) { klog.V(10).Infof("[RuntimeService] Status (timeout=%v)", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{}) + resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{}, opts...) if err != nil { klog.Errorf("Status from runtime service failed: %v", err) return nil, err @@ -531,14 +537,14 @@ func (r *RuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { } // ContainerStats returns the stats of the container. -func (r *RuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { +func (r *RuntimeService) ContainerStats(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStats, error) { klog.V(10).Infof("[RuntimeService] ContainerStats (containerID=%v, timeout=%v)", containerID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() resp, err := r.runtimeClient.ContainerStats(ctx, &runtimeapi.ContainerStatsRequest{ ContainerId: containerID, - }) + }, opts...) if err != nil { if r.logReduction.ShouldMessageBePrinted(err.Error(), containerID) { klog.Errorf("ContainerStats %q from runtime service failed: %v", containerID, err) @@ -552,7 +558,7 @@ func (r *RuntimeService) ContainerStats(containerID string) (*runtimeapi.Contain } // ListContainerStats lists all container stats given the provided filter -func (r *RuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { +func (r *RuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter, opts ...grpc.CallOption) ([]*runtimeapi.ContainerStats, error) { klog.V(10).Infof("[RuntimeService] ListContainerStats (filter=%v)", filter) // Do not set timeout, because writable layer stats collection takes time. // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? @@ -561,7 +567,7 @@ func (r *RuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFil resp, err := r.runtimeClient.ListContainerStats(ctx, &runtimeapi.ListContainerStatsRequest{ Filter: filter, - }) + }, opts...) if err != nil { klog.Errorf("ListContainerStats with filter %+v from runtime service failed: %v", filter, err) return nil, err @@ -572,12 +578,14 @@ func (r *RuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFil } // ReopenContainerLog reopens the container log for the given container ID -func (r *RuntimeService) ReopenContainerLog(containerID string) error { +func (r *RuntimeService) ReopenContainerLog(containerID string, opts ...grpc.CallOption) error { klog.V(10).Infof("[RuntimeService] ReopenContainerLog (containerID=%v, timeout=%v)", containerID, r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - _, err := r.runtimeClient.ReopenContainerLog(ctx, &runtimeapi.ReopenContainerLogRequest{ContainerId: containerID}) + _, err := r.runtimeClient.ReopenContainerLog(ctx, &runtimeapi.ReopenContainerLogRequest{ + ContainerId: containerID, + }, opts...) if err != nil { klog.Errorf("ReopenContainerLog %q from runtime service failed: %v", containerID, err) return err diff --git a/integration/remote/util/util_unix.go b/integration/remote/util/util_unix.go index de6d2c8..2be67bd 100644 --- a/integration/remote/util/util_unix.go +++ b/integration/remote/util/util_unix.go @@ -1,3 +1,4 @@ +//go:build freebsd || linux || darwin // +build freebsd linux darwin /* @@ -37,7 +38,6 @@ package util import ( "context" "fmt" - "io/ioutil" "net" "net/url" "os" @@ -72,7 +72,7 @@ func CreateListener(endpoint string) (net.Listener, error) { } // Create the socket on a tempfile and move it to the destination socket to handle improprer cleanup - file, err := ioutil.TempFile(filepath.Dir(addr), "") + file, err := os.CreateTemp(filepath.Dir(addr), "") if err != nil { return nil, fmt.Errorf("failed to create temporary file: %v", err) } diff --git a/integration/remote/util/util_unsupported.go b/integration/remote/util/util_unsupported.go index 81f4121..99128fd 100644 --- a/integration/remote/util/util_unsupported.go +++ b/integration/remote/util/util_unsupported.go @@ -1,3 +1,4 @@ +//go:build !freebsd && !linux && !windows && !darwin // +build !freebsd,!linux,!windows,!darwin /* diff --git a/integration/remote/util/util_windows.go b/integration/remote/util/util_windows.go index 979ebf2..51d1b88 100644 --- a/integration/remote/util/util_windows.go +++ b/integration/remote/util/util_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/integration/remote/utils.go b/integration/remote/utils.go index a1390c5..4ed583b 100644 --- a/integration/remote/utils.go +++ b/integration/remote/utils.go @@ -37,7 +37,7 @@ import ( "fmt" "time" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" ) // maxMsgSize use 16MB as the default message size limit. diff --git a/integration/restart_test.go b/integration/restart_test.go index 2ba89fc..767752c 100644 --- a/integration/restart_test.go +++ b/integration/restart_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,15 +17,18 @@ package integration import ( + goruntime "runtime" "sort" + "syscall" "testing" + "time" "github.com/containerd/containerd" "github.com/containerd/containerd/errdefs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // Restart test must run sequentially. @@ -73,10 +74,6 @@ func TestContainerdRestart(t *testing.T) { name: "created-container", state: runtime.ContainerState_CONTAINER_CREATED, }, - { - name: "running-container", - state: runtime.ContainerState_CONTAINER_RUNNING, - }, { name: "exited-container", state: runtime.ContainerState_CONTAINER_EXITED, @@ -84,6 +81,16 @@ func TestContainerdRestart(t *testing.T) { }, }, } + // NOTE(claudiub): The test will set the container's Linux.SecurityContext.NamespaceOptions.Pid = NamespaceMode_CONTAINER, + // and the expectation is that the container will keep running even if the sandbox container dies. + // We do not have that option on Windows. + if goruntime.GOOS != "windows" { + sandboxes[1].containers = append(sandboxes[1].containers, container{ + name: "running-container", + state: runtime.ContainerState_CONTAINER_RUNNING, + }) + } + t.Logf("Make sure no sandbox is running before test") existingSandboxes, err := runtimeService.ListPodSandbox(&runtime.PodSandboxFilter{}) require.NoError(t, err) @@ -100,6 +107,9 @@ func TestContainerdRestart(t *testing.T) { runtimeService.StopPodSandbox(sid) runtimeService.RemovePodSandbox(sid) }() + + EnsureImageExists(t, pauseImage) + s.id = sid for j := range s.containers { c := &s.containers[j] @@ -126,26 +136,48 @@ func TestContainerdRestart(t *testing.T) { require.NoError(t, err) task, err := cntr.Task(ctx, nil) require.NoError(t, err) - _, err = task.Delete(ctx, containerd.WithProcessKill) - if err != nil { - require.True(t, errdefs.IsNotFound(err)) + + waitCh, err := task.Wait(ctx) + require.NoError(t, err) + + err = task.Kill(ctx, syscall.SIGKILL, containerd.WithKillAll) + if goruntime.GOOS != "windows" { + // NOTE: CRI-plugin setups watcher for each container and + // cleanups container when the watcher returns exit event. + // We just need to kill that sandbox and wait for exit + // event from waitCh. If the sandbox container exits, + // the state of sandbox must be NOT_READY. + require.NoError(t, err) + } else { + // NOTE(gabriel-samfira): On Windows, the "notready-sandbox" array + // only has a container in the ContainerState_CONTAINER_CREATED + // state and a container in the ContainerState_CONTAINER_EXITED state. + // Sending a Kill() to a task that has already exited, or to a task that + // was never started (which is the case here), will always return an + // ErrorNotFound (at least on Windows). Given that in this sanbox, there + // will never be a running task, after we recover from a containerd restart + // we can expect an ErrorNotFound here every time. + // The waitCh channel should already be closed at this point. + assert.True(t, errdefs.IsNotFound(err), err) + } + + select { + case <-waitCh: + case <-time.After(30 * time.Second): + t.Fatalf("expected to receive exit event in time, but timeout") } } } t.Logf("Pull test images") - for _, image := range []string{GetImage(BusyBox), GetImage(Alpine)} { - img, err := imageService.PullImage(&runtime.ImageSpec{Image: image}, nil, nil) - require.NoError(t, err) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img})) - }() + for _, image := range []string{GetImage(BusyBox), GetImage(Pause)} { + EnsureImageExists(t, image) } imagesBeforeRestart, err := imageService.ListImages(nil) assert.NoError(t, err) t.Logf("Restart containerd") - RestartContainerd(t) + RestartContainerd(t, syscall.SIGTERM) t.Logf("Check sandbox and container state after restart") loadedSandboxes, err := runtimeService.ListPodSandbox(&runtime.PodSandboxFilter{}) @@ -153,17 +185,34 @@ func TestContainerdRestart(t *testing.T) { assert.Len(t, loadedSandboxes, len(sandboxes)) loadedContainers, err := runtimeService.ListContainers(&runtime.ContainerFilter{}) require.NoError(t, err) - assert.Len(t, loadedContainers, len(sandboxes)*3) + assert.Len(t, loadedContainers, len(sandboxes[0].containers)+len(sandboxes[1].containers)) for _, s := range sandboxes { for _, loaded := range loadedSandboxes { if s.id == loaded.Id { + t.Logf("Checking sandbox state for '%s'", s.name) assert.Equal(t, s.state, loaded.State) + + // See https://github.com/containerd/containerd/issues/7843 for details. + // Test that CNI result and sandbox IPs are still present after restart. + if loaded.State == runtime.PodSandboxState_SANDBOX_READY { + status, info, err := SandboxInfo(loaded.Id) + require.NoError(t, err) + + // Check that the NetNS didn't close on us, that we still have + // the CNI result, and that we still have the IP we were given + // for this pod. + require.False(t, info.NetNSClosed) + require.NotNil(t, info.CNIResult) + require.NotNil(t, status.Network) + require.NotEmpty(t, status.Network.Ip) + } break } } for _, c := range s.containers { for _, loaded := range loadedContainers { if c.id == loaded.Id { + t.Logf("Checking container state for '%s' in sandbox '%s'", c.name, s.name) assert.Equal(t, c.state, loaded.State) break } diff --git a/integration/runtime_handler_test.go b/integration/runtime_handler_test.go index ccea308..52cb6c2 100644 --- a/integration/runtime_handler_test.go +++ b/integration/runtime_handler_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -23,26 +21,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // TODO(chrisfegly): add/update test(s) to allow testing of multiple runtimes at the same time func TestRuntimeHandler(t *testing.T) { t.Logf("Create a sandbox") - sbConfig := PodSandboxConfig("sandbox", "test-runtime-handler") - if *runtimeHandler == "" { t.Logf("The --runtime-handler flag value is empty which results internally to setting the default runtime") } else { t.Logf("The --runtime-handler flag value is %s", *runtimeHandler) } - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - // Make sure the sandbox is cleaned up in any case. - runtimeService.StopPodSandbox(sb) - runtimeService.RemovePodSandbox(sb) - }() + sb, _ := PodSandboxConfigWithCleanup(t, "sandbox", "test-runtime-handler") t.Logf("Verify runtimeService.PodSandboxStatus() returns previously set runtimeHandler") sbStatus, err := runtimeService.PodSandboxStatus(sb) diff --git a/integration/sandbox_clean_remove_test.go b/integration/sandbox_clean_remove_test.go index f74c145..95e5a49 100644 --- a/integration/sandbox_clean_remove_test.go +++ b/integration/sandbox_clean_remove_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,7 +20,6 @@ package integration import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -30,7 +30,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestSandboxRemoveWithoutIPLeakage(t *testing.T) { @@ -39,11 +39,11 @@ func TestSandboxRemoveWithoutIPLeakage(t *testing.T) { t.Logf("Make sure host-local ipam is in use") config, err := CRIConfig() require.NoError(t, err) - fs, err := ioutil.ReadDir(config.NetworkPluginConfDir) + fs, err := os.ReadDir(config.NetworkPluginConfDir) require.NoError(t, err) require.NotEmpty(t, fs) f := filepath.Join(config.NetworkPluginConfDir, fs[0].Name()) - cniConfig, err := ioutil.ReadFile(f) + cniConfig, err := os.ReadFile(f) require.NoError(t, err) if !strings.Contains(string(cniConfig), "host-local") { t.Skip("host-local ipam is not in use") diff --git a/integration/sandbox_clean_remove_windows_test.go b/integration/sandbox_clean_remove_windows_test.go new file mode 100644 index 0000000..af8950e --- /dev/null +++ b/integration/sandbox_clean_remove_windows_test.go @@ -0,0 +1,179 @@ +//go:build windows +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "context" + "fmt" + "strconv" + "testing" + + "github.com/Microsoft/hcsshim/osversion" + "github.com/stretchr/testify/require" + "golang.org/x/sys/windows/registry" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +// Returns what nanoserver image version to use according to the build number +func getTestImage() (string, error) { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + return "", err + } + defer k.Close() + + b, _, _ := k.GetStringValue("CurrentBuild") + buildNum, _ := strconv.Atoi(b) + + switch buildNum { + case osversion.RS1: + return "mcr.microsoft.com/windows/nanoserver:sac2016", nil + case osversion.RS3: + return "mcr.microsoft.com/windows/nanoserver:1709", nil + case osversion.RS4: + return "mcr.microsoft.com/windows/nanoserver:1803", nil + case osversion.RS5: + return "mcr.microsoft.com/windows/nanoserver:1809", nil + case osversion.V19H1: + return "mcr.microsoft.com/windows/nanoserver:1903", nil + case osversion.V19H2: + return "mcr.microsoft.com/windows/nanoserver:1909", nil + case osversion.V20H1: + return "mcr.microsoft.com/windows/nanoserver:2004", nil + case osversion.V20H2: + return "mcr.microsoft.com/windows/nanoserver:20H2", nil + case osversion.V21H2Server: + return "mcr.microsoft.com/windows/nanoserver:ltsc2022", nil + default: + // Due to some efforts in improving down-level compatibility for Windows containers (see + // https://techcommunity.microsoft.com/t5/containers/windows-server-2022-and-beyond-for-containers/ba-p/2712487) + // the ltsc2022 image should continue to work on builds ws2022 and onwards (Windows 11 for example). With this in mind, + // if there's no mapping for the host build just use the Windows Server 2022 image. + if buildNum > osversion.V21H2Server { + return "mcr.microsoft.com/windows/nanoserver:ltsc2022", nil + } + return "", fmt.Errorf("No test image defined for Windows build version: %s", b) + } +} + +func removePodSandbox(ctx context.Context, t *testing.T, client runtime.RuntimeServiceClient, podID string) { + t.Helper() + _, err := client.RemovePodSandbox(ctx, &runtime.RemovePodSandboxRequest{ + PodSandboxId: podID, + }) + require.NoError(t, err, "failed RemovePodSandbox for sandbox: %s", podID) +} + +func stopPodSandbox(ctx context.Context, t *testing.T, client runtime.RuntimeServiceClient, podID string) { + t.Helper() + _, err := client.StopPodSandbox(ctx, &runtime.StopPodSandboxRequest{ + PodSandboxId: podID, + }) + require.NoError(t, err, "failed StopPodSandbox for sandbox: %s", podID) +} + +func stopContainer(ctx context.Context, t *testing.T, client runtime.RuntimeServiceClient, containerID string) { + t.Helper() + _, err := client.StopContainer(ctx, &runtime.StopContainerRequest{ + ContainerId: containerID, + Timeout: 0, + }) + require.NoError(t, err, "failed StopContainer request for container: %s", containerID) +} + +func startContainer(ctx context.Context, t *testing.T, client runtime.RuntimeServiceClient, containerID string) { + t.Helper() + _, err := client.StartContainer(ctx, &runtime.StartContainerRequest{ + ContainerId: containerID, + }) + require.NoError(t, err, "failed StartContainer request for container: %s", containerID) +} + +func removeContainer(ctx context.Context, t *testing.T, client runtime.RuntimeServiceClient, containerID string) { + t.Helper() + _, err := client.RemoveContainer(ctx, &runtime.RemoveContainerRequest{ + ContainerId: containerID, + }) + require.NoError(t, err, "failed RemoveContainer request for container: %s", containerID) +} + +// This test checks if create/stop and remove pods and containers work as expected +func TestCreateContainer(t *testing.T) { + testImage, err := getTestImage() + if err != nil { + t.Skip("skipping test, error: ", err) + } + client, err := RawRuntimeClient() + require.NoError(t, err, "failed to get raw grpc runtime service client") + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(func() { cancel() }) + + t.Log("Create a pod sandbox") + sbConfig := &runtime.PodSandboxConfig{ + Metadata: &runtime.PodSandboxMetadata{ + Name: t.Name(), + }, + } + sandboxRequest := &runtime.RunPodSandboxRequest{ + Config: sbConfig, + RuntimeHandler: "runhcs-wcow-process", + } + sandBoxResponse, err := client.RunPodSandbox(ctx, sandboxRequest) + require.NoError(t, err, "failed RunPodSandbox request") + // Make sure the sandbox is cleaned up. + t.Cleanup(func() { removePodSandbox(ctx, t, client, sandBoxResponse.PodSandboxId) }) + t.Cleanup(func() { stopPodSandbox(ctx, t, client, sandBoxResponse.PodSandboxId) }) + + EnsureImageExists(t, testImage) + + t.Log("Create a container") + createCtrRequest := &runtime.CreateContainerRequest{ + Config: &runtime.ContainerConfig{ + Metadata: &runtime.ContainerMetadata{ + Name: t.Name() + "-CreateContainerTest", + }, + Image: &runtime.ImageSpec{ + Image: testImage, + }, + Command: []string{ + "cmd", + "/c", + "ping", + "-t", + "127.0.0.1", + }, + Windows: &runtime.WindowsContainerConfig{ + Resources: &runtime.WindowsContainerResources{ + CpuShares: 500, + }, + }, + }, + PodSandboxId: sandBoxResponse.PodSandboxId, + SandboxConfig: sandboxRequest.Config, + } + + createCtrResponse, err := client.CreateContainer(ctx, createCtrRequest) + require.NoError(t, err, "failed CreateContainer request in sandbox: %s", sandBoxResponse.PodSandboxId) + // Make sure the container is cleaned up. + t.Cleanup(func() { removeContainer(ctx, t, client, createCtrResponse.ContainerId) }) + + startContainer(ctx, t, client, createCtrResponse.ContainerId) + stopContainer(ctx, t, client, createCtrResponse.ContainerId) +} diff --git a/integration/sandbox_run_rollback_test.go b/integration/sandbox_run_rollback_test.go new file mode 100644 index 0000000..c5cd75e --- /dev/null +++ b/integration/sandbox_run_rollback_test.go @@ -0,0 +1,396 @@ +//go:build linux +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + "testing" + "time" + + runtimespec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + criapiv1 "k8s.io/cri-api/pkg/apis/runtime/v1" + + "github.com/containerd/containerd/pkg/cri/store/sandbox" + "github.com/containerd/containerd/pkg/failpoint" + "github.com/containerd/typeurl" +) + +const ( + failpointRuntimeHandler = "runc-fp" + failpointCNIBinary = "cni-bridge-fp" + + failpointShimPrefixKey = "io.containerd.runtime.v2.shim.failpoint." + + failpointCNIConfPathKey = "failpoint.cni.containerd.io/confpath" +) + +func TestRunPodSandboxWithSetupCNIFailure(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip() + } + + t.Logf("Inject CNI failpoint") + conf := &failpointConf{ + Add: "1*error(you-shall-not-pass!)", + } + + sbConfig := PodSandboxConfig(t.Name(), "failpoint") + injectCNIFailpoint(t, sbConfig, conf) + + t.Logf("Create a sandbox") + _, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler) + require.Error(t, err) + require.Equal(t, true, strings.Contains(err.Error(), "you-shall-not-pass!")) + + t.Logf("Retry to create sandbox with same config") + sb, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler) + require.NoError(t, err) + + err = runtimeService.StopPodSandbox(sb) + require.NoError(t, err) + + err = runtimeService.RemovePodSandbox(sb) + require.NoError(t, err) +} + +func TestRunPodSandboxWithShimStartFailure(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip() + } + + t.Logf("Inject Shim failpoint") + + sbConfig := PodSandboxConfig(t.Name(), "failpoint") + injectShimFailpoint(t, sbConfig, map[string]string{ + "Start": "1*error(no hard feelings)", + }) + + t.Logf("Create a sandbox") + _, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler) + require.Error(t, err) + require.Equal(t, true, strings.Contains(err.Error(), "no hard feelings")) +} + +// TestRunPodSandboxWithShimDeleteFailure should keep the sandbox record if +// failed to rollback shim by shim.Delete API. +func TestRunPodSandboxWithShimDeleteFailure(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip() + } + if os.Getenv("ENABLE_CRI_SANDBOXES") != "" { + t.Skip() + } + + testCase := func(restart bool) func(*testing.T) { + return func(t *testing.T) { + t.Log("Init PodSandboxConfig with specific label") + labels := map[string]string{ + t.Name(): "true", + } + sbConfig := PodSandboxConfig(t.Name(), "failpoint", WithPodLabels(labels)) + + t.Log("Inject Shim failpoint") + injectShimFailpoint(t, sbConfig, map[string]string{ + "Start": "1*error(failed to start shim)", + "Delete": "1*error(please retry)", // inject failpoint during rollback shim + }) + + t.Log("Create a sandbox") + _, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to start shim") + + t.Log("ListPodSandbox with the specific label") + l, err := runtimeService.ListPodSandbox(&criapiv1.PodSandboxFilter{LabelSelector: labels}) + require.NoError(t, err) + require.Len(t, l, 1) + + sb := l[0] + require.Equal(t, sb.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY) + require.Equal(t, sb.Metadata.Name, sbConfig.Metadata.Name) + require.Equal(t, sb.Metadata.Namespace, sbConfig.Metadata.Namespace) + require.Equal(t, sb.Metadata.Uid, sbConfig.Metadata.Uid) + require.Equal(t, sb.Metadata.Attempt, sbConfig.Metadata.Attempt) + + t.Log("Check PodSandboxStatus") + sbStatus, err := runtimeService.PodSandboxStatus(sb.Id) + require.NoError(t, err) + require.Equal(t, sbStatus.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY) + require.Greater(t, len(sbStatus.Network.Ip), 0) + + if restart { + t.Log("Restart containerd") + RestartContainerd(t, syscall.SIGTERM) + + t.Log("ListPodSandbox with the specific label") + l, err = runtimeService.ListPodSandbox(&criapiv1.PodSandboxFilter{Id: sb.Id}) + require.NoError(t, err) + require.Len(t, l, 1) + require.Equal(t, l[0].State, criapiv1.PodSandboxState_SANDBOX_NOTREADY) + + t.Log("Check PodSandboxStatus") + sbStatus, err := runtimeService.PodSandboxStatus(sb.Id) + require.NoError(t, err) + t.Log(sbStatus.Network) + require.Equal(t, sbStatus.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY) + } + + t.Log("Cleanup leaky sandbox") + err = runtimeService.RemovePodSandbox(sb.Id) + require.NoError(t, err) + } + } + + t.Run("CleanupAfterRestart", testCase(true)) + t.Run("JustCleanup", testCase(false)) +} + +// TestRunPodSandboxWithShimStartAndTeardownCNIFailure should keep the sandbox +// record if failed to rollback CNI API. +func TestRunPodSandboxWithShimStartAndTeardownCNIFailure(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip() + } + if os.Getenv("ENABLE_CRI_SANDBOXES") != "" { + t.Skip() + } + + testCase := func(restart bool) func(*testing.T) { + return func(t *testing.T) { + t.Log("Init PodSandboxConfig with specific key") + labels := map[string]string{ + t.Name(): "true", + } + sbConfig := PodSandboxConfig(t.Name(), "failpoint", WithPodLabels(labels)) + + t.Log("Inject Shim failpoint") + injectShimFailpoint(t, sbConfig, map[string]string{ + "Start": "1*error(failed to start shim)", + }) + + t.Log("Inject CNI failpoint") + conf := &failpointConf{ + Del: "1*error(please retry)", + } + injectCNIFailpoint(t, sbConfig, conf) + + t.Log("Create a sandbox") + _, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to start shim") + + t.Log("ListPodSandbox with the specific label") + l, err := runtimeService.ListPodSandbox(&criapiv1.PodSandboxFilter{LabelSelector: labels}) + require.NoError(t, err) + require.Len(t, l, 1) + + sb := l[0] + require.Equal(t, sb.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY) + require.Equal(t, sb.Metadata.Name, sbConfig.Metadata.Name) + require.Equal(t, sb.Metadata.Namespace, sbConfig.Metadata.Namespace) + require.Equal(t, sb.Metadata.Uid, sbConfig.Metadata.Uid) + require.Equal(t, sb.Metadata.Attempt, sbConfig.Metadata.Attempt) + + if restart { + t.Log("Restart containerd") + RestartContainerd(t, syscall.SIGTERM) + + t.Log("ListPodSandbox with the specific label") + l, err = runtimeService.ListPodSandbox(&criapiv1.PodSandboxFilter{Id: sb.Id}) + require.NoError(t, err) + require.Len(t, l, 1) + require.Equal(t, l[0].State, criapiv1.PodSandboxState_SANDBOX_NOTREADY) + } + + t.Log("Cleanup leaky sandbox") + err = runtimeService.RemovePodSandbox(sb.Id) + require.NoError(t, err) + } + } + t.Run("CleanupAfterRestart", testCase(true)) + t.Run("JustCleanup", testCase(false)) +} + +// TestRunPodSandboxWithShimStartAndTeardownCNISlow should keep the sandbox +// record if failed to rollback CNI API. +func TestRunPodSandboxAndTeardownCNISlow(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip() + } + if os.Getenv("ENABLE_CRI_SANDBOXES") != "" { + t.Skip() + } + + t.Log("Init PodSandboxConfig with specific key") + sbName := t.Name() + labels := map[string]string{ + sbName: "true", + } + sbConfig := PodSandboxConfig(sbName, "failpoint", WithPodLabels(labels)) + + t.Log("Inject CNI failpoint") + conf := &failpointConf{ + // Delay 1 day + Add: "1*delay(86400000)", + } + injectCNIFailpoint(t, sbConfig, conf) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + t.Log("Create a sandbox") + _, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler) + require.Error(t, err) + require.Contains(t, err.Error(), "error reading from server: EOF") + }() + + assert.NoError(t, ensureCNIAddRunning(t, sbName), "check that failpoint CNI.Add is running") + + // Use SIGKILL to prevent containerd server gracefulshutdown which may cause indeterministic invocation of defer functions + t.Log("Restart containerd") + RestartContainerd(t, syscall.SIGKILL) + + wg.Wait() + + t.Log("ListPodSandbox with the specific label") + l, err := runtimeService.ListPodSandbox(&criapiv1.PodSandboxFilter{LabelSelector: labels}) + require.NoError(t, err) + require.Len(t, l, 1) + + sb := l[0] + + defer func() { + t.Log("Cleanup leaky sandbox") + err := runtimeService.StopPodSandbox(sb.Id) + assert.NoError(t, err) + err = runtimeService.RemovePodSandbox(sb.Id) + require.NoError(t, err) + }() + + assert.Equal(t, sb.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY) + assert.Equal(t, sb.Metadata.Name, sbConfig.Metadata.Name) + assert.Equal(t, sb.Metadata.Namespace, sbConfig.Metadata.Namespace) + assert.Equal(t, sb.Metadata.Uid, sbConfig.Metadata.Uid) + assert.Equal(t, sb.Metadata.Attempt, sbConfig.Metadata.Attempt) + + t.Log("Get sandbox info") + _, info, err := SandboxInfo(sb.Id) + require.NoError(t, err) + require.False(t, info.NetNSClosed) + + var netNS string + for _, n := range info.RuntimeSpec.Linux.Namespaces { + if n.Type == runtimespec.NetworkNamespace { + netNS = n.Path + } + } + assert.NotEmpty(t, netNS, "network namespace should be set") + + t.Log("Get sandbox container") + c, err := GetContainer(sb.Id) + require.NoError(t, err) + any, ok := c.Extensions["io.cri-containerd.sandbox.metadata"] + require.True(t, ok, "sandbox metadata should exist in extension") + i, err := typeurl.UnmarshalAny(&any) + require.NoError(t, err) + require.IsType(t, &sandbox.Metadata{}, i) + metadata, ok := i.(*sandbox.Metadata) + require.True(t, ok) + assert.NotEmpty(t, metadata.NetNSPath) + assert.Equal(t, netNS, metadata.NetNSPath, "network namespace path should be the same in runtime spec and sandbox metadata") +} + +func ensureCNIAddRunning(t *testing.T, sbName string) error { + return Eventually(func() (bool, error) { + pids, err := PidsOf(failpointCNIBinary) + if err != nil || len(pids) == 0 { + return false, err + } + + for _, pid := range pids { + envs, err := PidEnvs(pid) + if err != nil { + t.Logf("failed to read environ of pid %v: %v: skip it", pid, err) + continue + } + + args, ok := envs["CNI_ARGS"] + if !ok { + t.Logf("expected CNI_ARGS env but got nothing, skip pid=%v", pid) + continue + } + + for _, arg := range strings.Split(args, ";") { + kv := strings.SplitN(arg, "=", 2) + if len(kv) != 2 { + continue + } + + if kv[0] == "K8S_POD_NAME" && kv[1] == sbName { + return true, nil + } + } + } + return false, nil + }, time.Second, 30*time.Second) +} + +// failpointConf is used to describe cmdAdd/cmdDel/cmdCheck command's failpoint. +type failpointConf struct { + Add string `json:"cmdAdd"` + Del string `json:"cmdDel"` + Check string `json:"cmdCheck"` +} + +func injectCNIFailpoint(t *testing.T, sbConfig *criapiv1.PodSandboxConfig, conf *failpointConf) { + stateDir := t.TempDir() + + metadata := sbConfig.Metadata + fpFilename := filepath.Join(stateDir, + fmt.Sprintf("%s-%s.json", metadata.Namespace, strings.Replace(metadata.Name, "/", "-", -1))) + + data, err := json.Marshal(conf) + require.NoError(t, err) + + err = os.WriteFile(fpFilename, data, 0666) + require.NoError(t, err) + + sbConfig.Annotations[failpointCNIConfPathKey] = fpFilename +} + +func injectShimFailpoint(t *testing.T, sbConfig *criapiv1.PodSandboxConfig, methodFps map[string]string) { + for method, fp := range methodFps { + _, err := failpoint.NewFailpoint(method, fp) + require.NoError(t, err, "check failpoint %s for shim method %s", fp, method) + + sbConfig.Annotations[failpointShimPrefixKey+method] = fp + } +} diff --git a/integration/shim_dial_unix_test.go b/integration/shim_dial_unix_test.go new file mode 100644 index 0000000..981fe05 --- /dev/null +++ b/integration/shim_dial_unix_test.go @@ -0,0 +1,177 @@ +//go:build !windows +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "context" + "errors" + "net" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "testing" + "time" + + v1shimcli "github.com/containerd/containerd/runtime/v1/shim/client" + v2shimcli "github.com/containerd/containerd/runtime/v2/shim" + "github.com/containerd/ttrpc" +) + +const abstractSocketPrefix = "\x00" + +// TestFailFastWhenConnectShim is to test that the containerd task manager +// should not tolerate ENOENT during restarting. In linux, the containerd shim +// always listens on socket before task manager dial. If there is ENOENT or +// ECONNREFUSED error, the task manager should clean up because that socket file +// is gone or shim doesn't listen on the socket anymore. +func TestFailFastWhenConnectShim(t *testing.T) { + t.Parallel() + + // abstract Unix domain sockets are only for Linux. + if runtime.GOOS == "linux" { + t.Run("abstract-unix-socket-v1", testFailFastWhenConnectShim(true, v1shimcli.AnonDialer)) + t.Run("abstract-unix-socket-v2", testFailFastWhenConnectShim(true, v2shimcli.AnonDialer)) + } + t.Run("normal-unix-socket-v1", testFailFastWhenConnectShim(false, v1shimcli.AnonDialer)) + t.Run("normal-unix-socket-v2", testFailFastWhenConnectShim(false, v2shimcli.AnonDialer)) +} + +type dialFunc func(address string, timeout time.Duration) (net.Conn, error) + +func testFailFastWhenConnectShim(abstract bool, dialFn dialFunc) func(*testing.T) { + return func(t *testing.T) { + var ( + ctx = context.Background() + addr, listener, cleanup = newTestListener(t, abstract) + errCh = make(chan error, 1) + + checkDialErr = func(addr string, errCh chan error, expected error) { + go func() { + _, err := dialFn(addr, 1*time.Hour) + errCh <- err + }() + + select { + case <-time.After(10 * time.Second): + t.Fatalf("expected fail fast, but got timeout") + case err := <-errCh: + t.Helper() + if !errors.Is(err, expected) { + t.Fatalf("expected error %v, but got %v", expected, err) + } + } + } + ) + defer cleanup() + defer listener.Close() + + ttrpcSrv, err := ttrpc.NewServer() + if err != nil { + t.Fatalf("failed to new ttrpc server: %v", err) + } + go func() { + ttrpcSrv.Serve(ctx, listener) + }() + + // ttrpcSrv starts in other goroutine so that we need to retry AnonDialer + // here until ttrpcSrv receives the request. + go func() { + to := time.After(10 * time.Second) + + for { + select { + case <-to: + errCh <- errors.New("timeout") + return + default: + } + + conn, err := dialFn(addr, 1*time.Hour) + if err != nil { + if errors.Is(err, syscall.ECONNREFUSED) { + time.Sleep(10 * time.Millisecond) + continue + } + errCh <- err + return + } + + conn.Close() + errCh <- nil + return + } + }() + + // it should be successful + if err := <-errCh; err != nil { + t.Fatalf("failed to dial: %v", err) + } + + // NOTE(fuweid): + // + // UnixListener will unlink that the socket file when call Close. + // Disable unlink when close to keep the socket file. + listener.(*net.UnixListener).SetUnlinkOnClose(false) + + listener.Close() + ttrpcSrv.Shutdown(ctx) + + checkDialErr(addr, errCh, syscall.ECONNREFUSED) + + // remove the socket file + cleanup() + + if abstract { + checkDialErr(addr, errCh, syscall.ECONNREFUSED) + } else { + // should not wait for the socket file show up again. + checkDialErr(addr, errCh, syscall.ENOENT) + } + } +} + +func newTestListener(t testing.TB, abstract bool) (string, net.Listener, func()) { + tmpDir := t.TempDir() + + // NOTE(fuweid): + // + // Before patch https://github.com/containerd/containerd/commit/bd908acabd1a31c8329570b5283e8fdca0b39906, + // The shim stores the abstract socket file without abstract socket + // prefix and `unix://`. For the existing shim, if the socket file + // only contains the path, it will indicate that it is abstract socket. + // Otherwise, it will be normal socket file formated in `unix:///xyz'. + addr := filepath.Join(tmpDir, "uds.socket") + if abstract { + addr = abstractSocketPrefix + addr + } else { + addr = "unix://" + addr + } + + listener, err := net.Listen("unix", strings.TrimPrefix(addr, "unix://")) + if err != nil { + t.Fatalf("failed to listen on %s: %v", addr, err) + } + + return strings.TrimPrefix(addr, abstractSocketPrefix), listener, func() { + os.RemoveAll(tmpDir) + } +} diff --git a/integration/truncindex_test.go b/integration/truncindex_test.go index 67cddeb..684a220 100644 --- a/integration/truncindex_test.go +++ b/integration/truncindex_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,11 +17,12 @@ package integration import ( + goruntime "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" ) func genTruncIndex(normalName string) string { @@ -35,12 +34,9 @@ func TestTruncIndex(t *testing.T) { t.Logf("Pull an image") var appImage = GetImage(BusyBox) - imgID, err := imageService.PullImage(&runtimeapi.ImageSpec{Image: appImage}, nil, sbConfig) - require.NoError(t, err) + + imgID := EnsureImageExists(t, appImage) imgTruncID := genTruncIndex(imgID) - defer func() { - assert.NoError(t, imageService.RemoveImage(&runtimeapi.ImageSpec{Image: imgTruncID})) - }() t.Logf("Get image status by truncindex, truncID: %s", imgTruncID) res, err := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imgTruncID}) @@ -85,7 +81,7 @@ func TestTruncIndex(t *testing.T) { cnConfig := ContainerConfig( "containerTruncIndex", appImage, - WithCommand("top"), + WithCommand("sleep", "300"), ) cn, err := runtimeService.CreateContainer(sbTruncIndex, cnConfig, sbConfig) require.NoError(t, err) @@ -116,10 +112,17 @@ func TestTruncIndex(t *testing.T) { assert.Equal(t, cn, cStats.Attributes.Id) t.Logf("Update container memory limit after started") - err = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{ - MemoryLimitInBytes: 50 * 1024 * 1024, - }) - assert.NoError(t, err) + if goruntime.GOOS != "windows" { + err = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{ + MemoryLimitInBytes: 50 * 1024 * 1024, + }, nil) + assert.NoError(t, err) + } else { + err = runtimeService.UpdateContainerResources(cnTruncIndex, nil, &runtimeapi.WindowsContainerResources{ + MemoryLimitInBytes: 50 * 1024 * 1024, + }) + assert.NoError(t, err) + } t.Logf("Execute cmd in container") execReq := &runtimeapi.ExecRequest{ diff --git a/integration/util/boottime_util_darwin.go b/integration/util/boottime_util_darwin.go index 74abe9f..910632c 100644 --- a/integration/util/boottime_util_darwin.go +++ b/integration/util/boottime_util_darwin.go @@ -1,5 +1,3 @@ -// +build darwin - /* Copyright The containerd Authors. diff --git a/integration/util/boottime_util_linux.go b/integration/util/boottime_util_linux.go index 2699ae5..e435d8b 100644 --- a/integration/util/boottime_util_linux.go +++ b/integration/util/boottime_util_linux.go @@ -1,3 +1,4 @@ +//go:build freebsd || linux // +build freebsd linux /* diff --git a/integration/util/util_unix.go b/integration/util/util_unix.go index fcf36d9..3fa7cdd 100644 --- a/integration/util/util_unix.go +++ b/integration/util/util_unix.go @@ -1,3 +1,4 @@ +//go:build freebsd || linux || darwin // +build freebsd linux darwin /* @@ -37,7 +38,6 @@ package util import ( "context" "fmt" - "io/ioutil" "net" "net/url" "os" @@ -72,7 +72,7 @@ func CreateListener(endpoint string) (net.Listener, error) { } // Create the socket on a tempfile and move it to the destination socket to handle improprer cleanup - file, err := ioutil.TempFile(filepath.Dir(addr), "") + file, err := os.CreateTemp(filepath.Dir(addr), "") if err != nil { return nil, fmt.Errorf("failed to create temporary file: %v", err) } diff --git a/integration/util/util_unsupported.go b/integration/util/util_unsupported.go index 81f4121..99128fd 100644 --- a/integration/util/util_unsupported.go +++ b/integration/util/util_unsupported.go @@ -1,3 +1,4 @@ +//go:build !freebsd && !linux && !windows && !darwin // +build !freebsd,!linux,!windows,!darwin /* diff --git a/integration/util/util_windows.go b/integration/util/util_windows.go index 850a50f..183c6b8 100644 --- a/integration/util/util_windows.go +++ b/integration/util/util_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/integration/volume_copy_up_test.go b/integration/volume_copy_up_test.go index d6840a5..1e0e207 100644 --- a/integration/volume_copy_up_test.go +++ b/integration/volume_copy_up_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -20,13 +18,21 @@ package integration import ( "fmt" - "os/exec" + "os" + "path/filepath" + goruntime "runtime" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" +) + +const ( + containerUserName = "ContainerUser" + // containerUserSID is a well known SID that is set on the + // ContainerUser username inside a Windows container. + containerUserSID = "S-1-5-93-2-2" ) func TestVolumeCopyUp(t *testing.T) { @@ -36,23 +42,15 @@ func TestVolumeCopyUp(t *testing.T) { ) t.Logf("Create a sandbox") - sbConfig := PodSandboxConfig("sandbox", "volume-copy-up") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "volume-copy-up") - t.Logf("Pull test image") - _, err = imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) + EnsureImageExists(t, testImage) t.Logf("Create a container with volume-copy-up test image") cnConfig := ContainerConfig( "container", testImage, - WithCommand("tail", "-f", "/dev/null"), + WithCommand("sleep", "150"), ) cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) require.NoError(t, err) @@ -60,7 +58,7 @@ func TestVolumeCopyUp(t *testing.T) { t.Logf("Start the container") require.NoError(t, runtimeService.StartContainer(cn)) - // gcr.io/k8s-cri-containerd/volume-copy-up:2.0 contains a test_dir + // ghcr.io/containerd/volume-copy-up:2.1 contains a test_dir // volume, which contains a test_file with content "test_content". t.Logf("Check whether volume contains the test file") stdout, stderr, err := runtimeService.ExecSync(cn, []string{ @@ -72,10 +70,14 @@ func TestVolumeCopyUp(t *testing.T) { assert.Equal(t, "test_content\n", string(stdout)) t.Logf("Check host path of the volume") - hostCmd := fmt.Sprintf("find %s/containers/%s/volumes/*/test_file | xargs cat", *criRoot, cn) - output, err := exec.Command("sh", "-c", hostCmd).CombinedOutput() + volumePaths, err := getHostPathForVolumes(*criRoot, cn) require.NoError(t, err) - assert.Equal(t, "test_content\n", string(output)) + assert.Equal(t, len(volumePaths), 1, "expected exactly 1 volume") + + testFilePath := filepath.Join(volumePaths[0], "test_file") + contents, err := os.ReadFile(testFilePath) + require.NoError(t, err) + assert.Equal(t, "test_content\n", string(contents)) t.Logf("Update volume from inside the container") _, _, err = runtimeService.ExecSync(cn, []string{ @@ -86,9 +88,9 @@ func TestVolumeCopyUp(t *testing.T) { require.NoError(t, err) t.Logf("Check whether host path of the volume is updated") - output, err = exec.Command("sh", "-c", hostCmd).CombinedOutput() + contents, err = os.ReadFile(testFilePath) require.NoError(t, err) - assert.Equal(t, "new_content\n", string(output)) + assert.Equal(t, "new_content\n", string(contents)) } func TestVolumeOwnership(t *testing.T) { @@ -98,23 +100,15 @@ func TestVolumeOwnership(t *testing.T) { ) t.Logf("Create a sandbox") - sbConfig := PodSandboxConfig("sandbox", "volume-ownership") - sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) - require.NoError(t, err) - defer func() { - assert.NoError(t, runtimeService.StopPodSandbox(sb)) - assert.NoError(t, runtimeService.RemovePodSandbox(sb)) - }() + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "volume-ownership") - t.Logf("Pull test image") - _, err = imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil, sbConfig) - require.NoError(t, err) + EnsureImageExists(t, testImage) t.Logf("Create a container with volume-ownership test image") cnConfig := ContainerConfig( "container", testImage, - WithCommand("tail", "-f", "/dev/null"), + WithCommand("sleep", "150"), ) cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) require.NoError(t, err) @@ -122,19 +116,61 @@ func TestVolumeOwnership(t *testing.T) { t.Logf("Start the container") require.NoError(t, runtimeService.StartContainer(cn)) - // gcr.io/k8s-cri-containerd/volume-ownership:2.0 contains a test_dir + // ghcr.io/containerd/volume-ownership:2.1 contains a test_dir // volume, which is owned by nobody:nogroup. + // On Windows, the folder is situated in C:\volumes\test_dir and is owned + // by ContainerUser (SID: S-1-5-93-2-2). A helper tool get_owner.exe should + // exist inside the container that returns the owner in the form of USERNAME:SID. t.Logf("Check ownership of test directory inside container") - stdout, stderr, err := runtimeService.ExecSync(cn, []string{ + + cmd := []string{ "stat", "-c", "%U:%G", "/test_dir", - }, execTimeout) + } + expectedContainerOutput := "nobody:nogroup\n" + expectedHostOutput := "nobody:nogroup\n" + if goruntime.GOOS == "windows" { + cmd = []string{ + "C:\\bin\\get_owner.exe", + "C:\\volumes\\test_dir", + } + expectedContainerOutput = fmt.Sprintf("%s:%s", containerUserName, containerUserSID) + // The username is unknown on the host, but we can still get the SID. + expectedHostOutput = containerUserSID + } + stdout, stderr, err := runtimeService.ExecSync(cn, cmd, execTimeout) require.NoError(t, err) assert.Empty(t, stderr) - assert.Equal(t, "nobody:nogroup\n", string(stdout)) + assert.Equal(t, expectedContainerOutput, string(stdout)) t.Logf("Check ownership of test directory on the host") - hostCmd := fmt.Sprintf("find %s/containers/%s/volumes/* | xargs stat -c %%U:%%G", *criRoot, cn) - output, err := exec.Command("sh", "-c", hostCmd).CombinedOutput() + volumePaths, err := getHostPathForVolumes(*criRoot, cn) require.NoError(t, err) - assert.Equal(t, "nobody:nogroup\n", string(output)) + assert.Equal(t, len(volumePaths), 1, "expected exactly 1 volume") + + output, err := getOwnership(volumePaths[0]) + require.NoError(t, err) + assert.Equal(t, expectedHostOutput, output) +} + +func getHostPathForVolumes(criRoot, containerID string) ([]string, error) { + hostPath := filepath.Join(criRoot, "containers", containerID, "volumes") + if _, err := os.Stat(hostPath); err != nil { + return nil, err + } + + volumes, err := os.ReadDir(hostPath) + if err != nil { + return nil, err + } + + if len(volumes) == 0 { + return []string{}, nil + } + + volumePaths := make([]string, len(volumes)) + for idx, volume := range volumes { + volumePaths[idx] = filepath.Join(hostPath, volume.Name()) + } + + return volumePaths, nil } diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/integration/volume_copy_up_unix_test.go similarity index 67% rename from vendor/github.com/containerd/continuity/fs/hardlink_unix.go rename to integration/volume_copy_up_unix_test.go index f95f090..1dc76ab 100644 --- a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go +++ b/integration/volume_copy_up_unix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -16,19 +17,19 @@ limitations under the License. */ -package fs +package integration import ( - "os" - "syscall" + "fmt" + + exec "golang.org/x/sys/execabs" ) -func getLinkInfo(fi os.FileInfo) (uint64, bool) { - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return 0, false +func getOwnership(path string) (string, error) { + hostCmd := fmt.Sprintf("stat -c %%U:%%G '%s'", path) + output, err := exec.Command("sh", "-c", hostCmd).CombinedOutput() + if err != nil { + return "", err } - - // Ino is uint32 on bsd, uint64 on darwin/linux/solaris - return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert + return string(output), nil } diff --git a/vendor/github.com/containerd/continuity/driver/lchmod_unix.go b/integration/volume_copy_up_windows_test.go similarity index 61% rename from vendor/github.com/containerd/continuity/driver/lchmod_unix.go rename to integration/volume_copy_up_windows_test.go index b8877a8..3ea5b2b 100644 --- a/vendor/github.com/containerd/continuity/driver/lchmod_unix.go +++ b/integration/volume_copy_up_windows_test.go @@ -1,4 +1,5 @@ -// +build darwin freebsd solaris +//go:build windows +// +build windows /* Copyright The containerd Authors. @@ -16,19 +17,24 @@ limitations under the License. */ -package driver +package integration import ( - "os" - - "golang.org/x/sys/unix" + "golang.org/x/sys/windows" ) -// Lchmod changes the mode of a file not following symlinks. -func (d *driver) Lchmod(path string, mode os.FileMode) error { - err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) +func getOwnership(path string) (string, error) { + secInfo, err := windows.GetNamedSecurityInfo( + path, windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION) + if err != nil { - err = &os.PathError{Op: "lchmod", Path: path, Err: err} + return "", err } - return err + + sid, _, err := secInfo.Owner() + if err != nil { + return "", err + } + return sid.String(), nil } diff --git a/integration/windows_hostprocess_test.go b/integration/windows_hostprocess_test.go new file mode 100644 index 0000000..390f2db --- /dev/null +++ b/integration/windows_hostprocess_test.go @@ -0,0 +1,191 @@ +//go:build windows +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "fmt" + "os" + "strconv" + "testing" + "time" + + "github.com/Microsoft/hcsshim/osversion" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sys/windows/registry" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + v1 "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +type hpcAction func(*testing.T, string, *v1.ContainerConfig) + +var ( + defaultCommand = WithCommand("Powershell", "/c", "$env:CONTAINER_SANDBOX_MOUNT_POINT/pause.exe") + localServiceUsername = WithWindowsUsername("NT AUTHORITY\\Local service") + localSystemUsername = WithWindowsUsername("NT AUTHORITY\\System") + hpcContainerOpt = WithWindowsHostProcessContainer() + defaultAction hpcAction = func(t *testing.T, containerID string, containerConfig *v1.ContainerConfig) {} +) + +// Tests to verify the Windows HostProcess +func TestWindowsHostProcess(t *testing.T) { + EnsureImageExists(t, pauseImage) + + t.Run("run as Local Service", func(t *testing.T) { + runHostProcess(t, false, pauseImage, defaultAction, hpcContainerOpt, localServiceUsername, defaultCommand) + }) + t.Run("run as Local System", func(t *testing.T) { + runHostProcess(t, false, pauseImage, defaultAction, hpcContainerOpt, localSystemUsername, defaultCommand) + }) + t.Run("run as unacceptable user", func(t *testing.T) { + runHostProcess(t, true, pauseImage, defaultAction, hpcContainerOpt, WithWindowsUsername("Guest"), defaultCommand) + }) + t.Run("run command on host", func(t *testing.T) { + cmd := WithCommand("Powershell", "/c", "Get-Command containerd.exe") + runHostProcess(t, false, pauseImage, defaultAction, hpcContainerOpt, localServiceUsername, cmd) + }) + t.Run("run withHostNetwork", func(t *testing.T) { + hostname, err := os.Hostname() + require.NoError(t, err) + cmd := WithCommand("Powershell", "/c", fmt.Sprintf("if ($env:COMPUTERNAME -ne %s) { exit -1 }", hostname)) + runHostProcess(t, false, pauseImage, defaultAction, hpcContainerOpt, localServiceUsername, cmd) + }) + t.Run("run with a different os.version image", func(t *testing.T) { + image := "docker.io/e2eteam/busybox:1.29-windows-amd64-1909" + EnsureImageExists(t, image) + runHostProcess(t, false, image, defaultAction, hpcContainerOpt, localServiceUsername, defaultCommand) + }) + t.Run("run and check stats", func(t *testing.T) { + var stats = func(t *testing.T, containerID string, containerConfig *v1.ContainerConfig) { + t.Logf("Fetch stats for container") + var ( + s *runtime.ContainerStats + err error + ) + require.NoError(t, Eventually(func() (bool, error) { + s, err = runtimeService.ContainerStats(containerID) + if err != nil { + return false, err + } + if s.GetWritableLayer().GetUsedBytes().GetValue() != 0 { + return true, nil + } + return false, nil + }, time.Second, 30*time.Second)) + + t.Logf("Verify stats received for container %q", containerConfig) + testStats(t, s, containerConfig) + } + runHostProcess(t, false, pauseImage, stats, hpcContainerOpt, localServiceUsername, defaultCommand, WithTestAnnotations(), WithTestLabels()) + }) +} + +func runHostProcess(t *testing.T, expectErr bool, image string, action hpcAction, opts ...ContainerOpts) { + t.Logf("Create a pod config and run sandbox container") + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox1", "hostprocess", WithWindowsHostProcessPod) + + t.Logf("Create a container config and run container in a pod") + containerConfig := ContainerConfig( + "container1", + image, + opts..., + ) + cn, err := runtimeService.CreateContainer(sb, containerConfig, sbConfig) + require.NoError(t, err) + defer func() { + assert.NoError(t, runtimeService.RemoveContainer(cn)) + }() + _, err = t, runtimeService.StartContainer(cn) + if err != nil { + if !expectErr { + t.Fatalf("Unexpected error while starting Container: %v", err) + } + return + } + defer func() { + assert.NoError(t, runtimeService.StopContainer(cn, 10)) + }() + + action(t, cn, containerConfig) +} + +func runAndRemoveContainer(t *testing.T, sb string, sbConfig *runtime.PodSandboxConfig, cnConfig *runtime.ContainerConfig) { + t.Log("Create the container") + cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) + require.NoError(t, err) + t.Log("Start the container") + require.NoError(t, runtimeService.StartContainer(cn)) + // Wait few seconds for the container to be completely initialized + time.Sleep(5 * time.Second) + + t.Log("Stop the container") + require.NoError(t, runtimeService.StopContainer(cn, 0)) + t.Log("Remove the container") + require.NoError(t, runtimeService.RemoveContainer(cn)) +} + +func TestArgsEscapedImagesOnWindows(t *testing.T) { + // the ArgsEscaped test image is based on nanoserver:ltsc2022, so ensure we run on the correct OS version + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + t.Skip("Error in getting OS version") + } + defer k.Close() + + b, _, _ := k.GetStringValue("CurrentBuild") + buildNum, _ := strconv.Atoi(b) + if buildNum < osversion.V21H2Server { + t.Skip() + } + + containerName := "test-container" + testImage := GetImage(ArgsEscaped) + sbConfig := &runtime.PodSandboxConfig{ + Metadata: &runtime.PodSandboxMetadata{ + Name: "sandbox", + Namespace: testImage, + }, + Windows: &runtime.WindowsPodSandboxConfig{}, + } + sb, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, runtimeService.StopPodSandbox(sb)) + assert.NoError(t, runtimeService.RemovePodSandbox(sb)) + }) + + EnsureImageExists(t, testImage) + + cnConfigWithCtrCmd := ContainerConfig( + containerName, + testImage, + WithCommand("ping", "-t", "127.0.0.1"), + localSystemUsername, + ) + + cnConfigNoCtrCmd := ContainerConfig( + containerName, + testImage, + localSystemUsername, + ) + + runAndRemoveContainer(t, sb, sbConfig, cnConfigWithCtrCmd) + runAndRemoveContainer(t, sb, sbConfig, cnConfigNoCtrCmd) +} diff --git a/labels/validate.go b/labels/validate.go index 0de4616..1fd527a 100644 --- a/labels/validate.go +++ b/labels/validate.go @@ -17,8 +17,9 @@ package labels import ( + "fmt" + "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) const ( @@ -31,7 +32,7 @@ func Validate(k, v string) error { if len(k) > 10 { k = k[:10] } - return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k) + return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s: %w", maxSize, k, errdefs.ErrInvalidArgument) } return nil } diff --git a/leases/id.go b/leases/id.go index 8781a1d..8f5dc93 100644 --- a/leases/id.go +++ b/leases/id.go @@ -17,9 +17,9 @@ package leases import ( + "crypto/rand" "encoding/base64" "fmt" - "math/rand" "time" ) diff --git a/leases/lease.go b/leases/lease.go index 058d065..fc0ca34 100644 --- a/leases/lease.go +++ b/leases/lease.go @@ -65,10 +65,15 @@ func SynchronousDelete(ctx context.Context, o *DeleteOptions) error { return nil } -// WithLabels sets labels on a lease +// WithLabels merges labels on a lease func WithLabels(labels map[string]string) Opt { return func(l *Lease) error { - l.Labels = labels + if l.Labels == nil { + l.Labels = map[string]string{} + } + for k, v := range labels { + l.Labels[k] = v + } return nil } } diff --git a/leases/lease_test.go b/leases/lease_test.go new file mode 100644 index 0000000..2c92e69 --- /dev/null +++ b/leases/lease_test.go @@ -0,0 +1,83 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWithLabels(t *testing.T) { + type unitTest struct { + name string + uut *Lease + labels map[string]string + expected map[string]string + } + + addLabelsToEmptyMap := &unitTest{ + name: "AddLabelsToEmptyMap", + uut: &Lease{}, + labels: map[string]string{ + "containerd.io/gc.root": "2015-12-04T00:00:00Z", + }, + expected: map[string]string{ + "containerd.io/gc.root": "2015-12-04T00:00:00Z", + }, + } + + addLabelsToNonEmptyMap := &unitTest{ + name: "AddLabelsToNonEmptyMap", + uut: &Lease{ + Labels: map[string]string{ + "containerd.io/gc.expire": "2015-12-05T00:00:00Z", + }, + }, + labels: map[string]string{ + "containerd.io/gc.root": "2015-12-04T00:00:00Z", + "containerd.io/gc.ref.snapshot.overlayfs": "sha256:87806a591ce894ff5c699c28fe02093d6cdadd6b1ad86819acea05ccb212ff3d", + }, + expected: map[string]string{ + "containerd.io/gc.root": "2015-12-04T00:00:00Z", + "containerd.io/gc.ref.snapshot.overlayfs": "sha256:87806a591ce894ff5c699c28fe02093d6cdadd6b1ad86819acea05ccb212ff3d", + "containerd.io/gc.expire": "2015-12-05T00:00:00Z", + }, + } + + testcases := []*unitTest{ + addLabelsToEmptyMap, + addLabelsToNonEmptyMap, + } + + for _, testcase := range testcases { + testcase := testcase + + t.Run(testcase.name, func(t *testing.T) { + f := WithLabels(testcase.labels) + + err := f(testcase.uut) + require.NoError(t, err) + + for k, v := range testcase.expected { + assert.Contains(t, testcase.uut.Labels, k) + assert.Equal(t, v, testcase.uut.Labels[k]) + } + }) + } +} diff --git a/log/context.go b/log/context.go index 37b6a7d..2015306 100644 --- a/log/context.go +++ b/log/context.go @@ -14,55 +14,169 @@ limitations under the License. */ +// Package log provides types and functions related to logging, passing +// loggers through a context, and attaching context to the logger. +// +// # Transitional types +// +// This package contains various types that are aliases for types in [logrus]. +// These aliases are intended for transitioning away from hard-coding logrus +// as logging implementation. Consumers of this package are encouraged to use +// the type-aliases from this package instead of directly using their logrus +// equivalent. +// +// The intent is to replace these aliases with locally defined types and +// interfaces once all consumers are no longer directly importing logrus +// types. +// +// IMPORTANT: due to the transitional purpose of this package, it is not +// guaranteed for the full logrus API to be provided in the future. As +// outlined, these aliases are provided as a step to transition away from +// a specific implementation which, as a result, exposes the full logrus API. +// While no decisions have been made on the ultimate design and interface +// provided by this package, we do not expect carrying "less common" features. package log import ( "context" + "fmt" "github.com/sirupsen/logrus" ) -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger +// G is a shorthand for [GetLogger]. +// +// We may want to define this locally to a package to get package tagged log +// messages. +var G = GetLogger - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) +// L is an alias for the standard logger. +var L = &Entry{ + Logger: logrus.StandardLogger(), + // Default is three fields plus a little extra room. + Data: make(Fields, 6), +} -type ( - loggerKey struct{} -) +type loggerKey struct{} +// Fields type to pass to "WithFields". +type Fields = map[string]any + +// Entry is a logging entry. It contains all the fields passed with +// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn, +// Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +// +// Entry is a transitional type, and currently an alias for [logrus.Entry]. +type Entry = logrus.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// Level is a logging level. +type Level = logrus.Level + +// Supported log levels. const ( - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to - // ensure the formatted time is always the same number of characters. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // TraceLevel level. Designates finer-grained informational events + // than [DebugLevel]. + TraceLevel Level = logrus.TraceLevel - // TextFormat represents the text logging format - TextFormat = "text" + // DebugLevel level. Usually only enabled when debugging. Very verbose + // logging. + DebugLevel Level = logrus.DebugLevel - // JSONFormat represents the JSON logging format - JSONFormat = "json" + // InfoLevel level. General operational entries about what's going on + // inside the application. + InfoLevel Level = logrus.InfoLevel + + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel Level = logrus.WarnLevel + + // ErrorLevel level. Logs errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel Level = logrus.ErrorLevel + + // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits + // even if the logging level is set to Panic. + FatalLevel Level = logrus.FatalLevel + + // PanicLevel level. This is the highest level of severity. Logs and + // then calls panic with the message passed to Debug, Info, ... + PanicLevel Level = logrus.PanicLevel ) +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// level can be one of: +// +// - "trace" ([TraceLevel]) +// - "debug" ([DebugLevel]) +// - "info" ([InfoLevel]) +// - "warn" ([WarnLevel]) +// - "error" ([ErrorLevel]) +// - "fatal" ([FatalLevel]) +// - "panic" ([PanicLevel]) +func SetLevel(level string) error { + lvl, err := logrus.ParseLevel(level) + if err != nil { + return err + } + + L.Logger.SetLevel(lvl) + return nil +} + +// GetLevel returns the current log level. +func GetLevel() Level { + return L.Logger.GetLevel() +} + +// OutputFormat specifies a log output format. +type OutputFormat string + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + TextFormat OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + JSONFormat OutputFormat = "json" +) + +// SetFormat sets the log output format ([TextFormat] or [JSONFormat]). +func SetFormat(format OutputFormat) error { + switch format { + case TextFormat: + L.Logger.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: RFC3339NanoFixed, + FullTimestamp: true, + }) + return nil + case JSONFormat: + L.Logger.SetFormatter(&logrus.JSONFormatter{ + TimestampFormat: RFC3339NanoFixed, + }) + return nil + default: + return fmt.Errorf("unknown log format: %s", format) + } +} + // WithLogger returns a new context with the provided logger. Use in // combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) +func WithLogger(ctx context.Context, logger *Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx)) } // GetLogger retrieves the current logger from the context. If no logger is // available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L +func GetLogger(ctx context.Context) *Entry { + if logger := ctx.Value(loggerKey{}); logger != nil { + return logger.(*Entry) } - - return logger.(*logrus.Entry) + return L.WithContext(ctx) } diff --git a/log/context_test.go b/log/context_test.go index 0d9827c..1b6d4a6 100644 --- a/log/context_test.go +++ b/log/context_test.go @@ -18,17 +18,46 @@ package log import ( "context" + "reflect" "testing" - "gotest.tools/v3/assert" + "github.com/sirupsen/logrus" ) func TestLoggerContext(t *testing.T) { + const expected = "one" ctx := context.Background() - assert.Equal(t, GetLogger(ctx), L) // should be same as L variable - assert.Equal(t, G(ctx), GetLogger(ctx)) // these should be the same. - - ctx = WithLogger(ctx, G(ctx).WithField("test", "one")) - assert.Equal(t, GetLogger(ctx).Data["test"], "one") - assert.Equal(t, G(ctx), GetLogger(ctx)) // these should be the same. + ctx = WithLogger(ctx, G(ctx).WithField("test", expected)) + if actual := GetLogger(ctx).Data["test"]; actual != expected { + t.Errorf("expected: %v, got: %v", expected, actual) + } + a := G(ctx) + b := GetLogger(ctx) + if !reflect.DeepEqual(a, b) || a != b { + t.Errorf("should be the same: %+v, %+v", a, b) + } +} + +func TestCompat(t *testing.T) { + expected := Fields{ + "hello1": "world1", + "hello2": "world2", + "hello3": "world3", + } + + l := G(context.TODO()) + l = l.WithFields(logrus.Fields{"hello1": "world1"}) + l = l.WithFields(Fields{"hello2": "world2"}) + l = l.WithFields(map[string]any{"hello3": "world3"}) + if !reflect.DeepEqual(Fields(l.Data), expected) { + t.Errorf("expected: (%[1]T) %+[1]v, got: (%[2]T) %+[2]v", expected, l.Data) + } + + l2 := L + l2 = l2.WithFields(logrus.Fields{"hello1": "world1"}) + l2 = l2.WithFields(Fields{"hello2": "world2"}) + l2 = l2.WithFields(map[string]any{"hello3": "world3"}) + if !reflect.DeepEqual(Fields(l2.Data), expected) { + t.Errorf("expected: (%[1]T) %+[1]v, got: (%[2]T) %+[2]v", expected, l2.Data) + } } diff --git a/log/logtest/context.go b/log/logtest/context.go index 870c62a..218f525 100644 --- a/log/logtest/context.go +++ b/log/logtest/context.go @@ -19,7 +19,7 @@ package logtest import ( "context" "fmt" - "io/ioutil" + "io" "path/filepath" "runtime" "testing" @@ -37,7 +37,7 @@ func WithT(ctx context.Context, t testing.TB) context.Context { // Increase debug level for tests l.SetLevel(logrus.DebugLevel) - l.SetOutput(ioutil.Discard) + l.SetOutput(io.Discard) l.SetReportCaller(true) // Add testing hook diff --git a/metadata/bolt.go b/metadata/bolt.go index 6ea4608..9edfab5 100644 --- a/metadata/bolt.go +++ b/metadata/bolt.go @@ -18,8 +18,8 @@ package metadata import ( "context" + "fmt" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -55,7 +55,7 @@ func update(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error { if !ok { return db.Update(fn) } else if !tx.Writable() { - return errors.Wrap(bolt.ErrTxNotWritable, "unable to use transaction from context") + return fmt.Errorf("unable to use transaction from context: %w", bolt.ErrTxNotWritable) } return fn(tx) } diff --git a/metadata/boltutil/helpers.go b/metadata/boltutil/helpers.go index 6995917..4201d7b 100644 --- a/metadata/boltutil/helpers.go +++ b/metadata/boltutil/helpers.go @@ -17,11 +17,11 @@ package boltutil import ( + "fmt" "time" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -99,7 +99,7 @@ func writeMap(bkt *bolt.Bucket, bucketName []byte, labels map[string]string) err } if err := lbkt.Put([]byte(k), []byte(v)); err != nil { - return errors.Wrapf(err, "failed to set label %q=%q", k, v) + return fmt.Errorf("failed to set label %q=%q: %w", k, v, err) } } @@ -162,6 +162,7 @@ func WriteExtensions(bkt *bolt.Bucket, extensions map[string]types.Any) error { } for name, ext := range extensions { + ext := ext p, err := proto.Marshal(&ext) if err != nil { return err @@ -228,7 +229,7 @@ func ReadAny(bkt *bolt.Bucket, name []byte) (*types.Any, error) { out := types.Any{} if err := proto.Unmarshal(bytes, &out); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal any") + return nil, fmt.Errorf("failed to unmarshal any: %w", err) } return &out, nil diff --git a/metadata/buckets.go b/metadata/buckets.go index fa947fb..516de1f 100644 --- a/metadata/buckets.go +++ b/metadata/buckets.go @@ -15,7 +15,7 @@ */ // Package metadata stores all labels and object specific metadata by namespace. -// This package also contains the main garbage collection logic for cleaning up +// This package also contains the main garbage collection logic for cleaning up // resources consistently and atomically. Resources used by backends will be // tracked in the metadata store to be exposed to consumers of this package. // @@ -26,7 +26,7 @@ // // Generically, we try to do the following: // -// /// -> +// /// -> // // version: Currently, this is "v1". Additions can be made to v1 in a backwards // compatible way. If the layout changes, a new version must be made, along @@ -46,72 +46,73 @@ // the structure is changed in addition to adding a migration and incrementing // the database version. Note that `╘══*...*` refers to maps with arbitrary // keys. -// ├──version : - Latest version, see migrations -// └──v1 - Schema version bucket -// ╘══*namespace* -// ├──labels -// │  ╘══*key* : - Label value -// ├──image -// │  ╘══*image name* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──target -// │   │  ├──digest : - Descriptor digest -// │   │  ├──mediatype : - Descriptor media type -// │   │  └──size : - Descriptor size -// │   └──labels -// │   ╘══*key* : - Label value -// ├──containers -// │  ╘══*container id* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──spec : - Proto marshaled spec -// │   ├──image : - Image name -// │   ├──snapshotter : - Snapshotter name -// │   ├──snapshotKey : - Snapshot key -// │   ├──runtime -// │   │  ├──name : - Runtime name -// │   │  ├──extensions -// │   │  │  ╘══*name* : - Proto marshaled extension -// │   │  └──options : - Proto marshaled options -// │   └──labels -// │   ╘══*key* : - Label value -// ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* -// │    ├──name : - Snapshot name in backend -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │    ├──parent : - Parent snapshot name -// │   ├──children -// │   │  ╘══*snapshot key* : - Child snapshot reference -// │   └──labels -// │   ╘══*key* : - Label value -// ├──content -// │  ├──blob -// │  │ ╘══*blob digest* -// │  │ ├──createdat : - Created at -// │  │ ├──updatedat : - Updated at -// │  │   ├──size : - Blob size -// │  │ └──labels -// │  │ ╘══*key* : - Label value -// │  └──ingests -// │   ╘══*ingest reference* -// │    ├──ref : - Ingest reference in backend -// │   ├──expireat : - Time to expire ingest -// │   └──expected : - Expected commit digest -// └──leases -// ╘══*lease id* -//   ├──createdat : - Created at -// ├──labels -// │ ╘══*key* : - Label value -//   ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* : - Snapshot reference -//   ├──content -// │  ╘══*blob digest* : - Content blob reference -// └──ingests -//   ╘══*ingest reference* : - Content ingest reference +// +// ├──version : - Latest version, see migrations +// └──v1 - Schema version bucket +// ╘══*namespace* +// ├──labels +// │  ╘══*key* : - Label value +// ├──image +// │  ╘══*image name* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──target +// │   │  ├──digest : - Descriptor digest +// │   │  ├──mediatype : - Descriptor media type +// │   │  └──size : - Descriptor size +// │   └──labels +// │   ╘══*key* : - Label value +// ├──containers +// │  ╘══*container id* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──spec : - Proto marshaled spec +// │   ├──image : - Image name +// │   ├──snapshotter : - Snapshotter name +// │   ├──snapshotKey : - Snapshot key +// │   ├──runtime +// │   │  ├──name : - Runtime name +// │   │  ├──extensions +// │   │  │  ╘══*name* : - Proto marshaled extension +// │   │  └──options : - Proto marshaled options +// │   └──labels +// │   ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* +// │    ├──name : - Snapshot name in backend +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │    ├──parent : - Parent snapshot name +// │   ├──children +// │   │  ╘══*snapshot key* : - Child snapshot reference +// │   └──labels +// │   ╘══*key* : - Label value +// ├──content +// │  ├──blob +// │  │ ╘══*blob digest* +// │  │ ├──createdat : - Created at +// │  │ ├──updatedat : - Updated at +// │  │   ├──size : - Blob size +// │  │ └──labels +// │  │ ╘══*key* : - Label value +// │  └──ingests +// │   ╘══*ingest reference* +// │    ├──ref : - Ingest reference in backend +// │   ├──expireat : - Time to expire ingest +// │   └──expected : - Expected commit digest +// └──leases +// ╘══*lease id* +//   ├──createdat : - Created at +// ├──labels +// │ ╘══*key* : - Label value +//   ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* : - Snapshot reference +//   ├──content +// │  ╘══*blob digest* : - Content blob reference +// └──ingests +//   ╘══*ingest reference* : - Content ingest reference package metadata import ( diff --git a/metadata/containers.go b/metadata/containers.go index 26e86d8..97002e5 100644 --- a/metadata/containers.go +++ b/metadata/containers.go @@ -18,6 +18,7 @@ package metadata import ( "context" + "fmt" "strings" "sync/atomic" "time" @@ -31,7 +32,6 @@ import ( "github.com/containerd/containerd/namespaces" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -57,11 +57,11 @@ func (s *containerStore) Get(ctx context.Context, id string) (containers.Contain if err := view(ctx, s.db, func(tx *bolt.Tx) error { bkt := getContainerBucket(tx, namespace, id) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace) + return fmt.Errorf("container %q in namespace %q: %w", id, namespace, errdefs.ErrNotFound) } if err := readContainer(&container, bkt); err != nil { - return errors.Wrapf(err, "failed to read container %q", id) + return fmt.Errorf("failed to read container %q: %w", id, err) } return nil @@ -80,7 +80,7 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C filter, err := filters.ParseAll(fs...) if err != nil { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) } var m []containers.Container @@ -99,7 +99,7 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C container := containers.Container{ID: string(k)} if err := readContainer(&container, cbkt); err != nil { - return errors.Wrapf(err, "failed to read container %q", string(k)) + return fmt.Errorf("failed to read container %q: %w", string(k), err) } if filter.Match(adaptContainer(container)) { @@ -121,7 +121,7 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai } if err := validateContainer(&container); err != nil { - return containers.Container{}, errors.Wrap(err, "create container failed validation") + return containers.Container{}, fmt.Errorf("create container failed validation: %w", err) } if err := update(ctx, s.db, func(tx *bolt.Tx) error { @@ -133,7 +133,7 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai cbkt, err := bkt.CreateBucket([]byte(container.ID)) if err != nil { if err == bolt.ErrBucketExists { - err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID) + err = fmt.Errorf("container %q: %w", container.ID, errdefs.ErrAlreadyExists) } return err } @@ -141,7 +141,7 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai container.CreatedAt = time.Now().UTC() container.UpdatedAt = container.CreatedAt if err := writeContainer(cbkt, &container); err != nil { - return errors.Wrapf(err, "failed to write container %q", container.ID) + return fmt.Errorf("failed to write container %q: %w", container.ID, err) } return nil @@ -159,23 +159,23 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai } if container.ID == "" { - return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "must specify a container id") + return containers.Container{}, fmt.Errorf("must specify a container id: %w", errdefs.ErrInvalidArgument) } var updated containers.Container if err := update(ctx, s.db, func(tx *bolt.Tx) error { bkt := getContainersBucket(tx, namespace) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace) + return fmt.Errorf("cannot update container %q in namespace %q: %w", container.ID, namespace, errdefs.ErrNotFound) } cbkt := bkt.Bucket([]byte(container.ID)) if cbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID) + return fmt.Errorf("container %q: %w", container.ID, errdefs.ErrNotFound) } if err := readContainer(&updated, cbkt); err != nil { - return errors.Wrapf(err, "failed to read container %q", container.ID) + return fmt.Errorf("failed to read container %q: %w", container.ID, err) } createdat := updated.CreatedAt updated.ID = container.ID @@ -188,11 +188,11 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai // are provided. This allows these fields to become mutable in the // future. if updated.Snapshotter != container.Snapshotter { - return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable") + return fmt.Errorf("container.Snapshotter field is immutable: %w", errdefs.ErrInvalidArgument) } if updated.Runtime.Name != container.Runtime.Name { - return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable") + return fmt.Errorf("container.Runtime.Name field is immutable: %w", errdefs.ErrInvalidArgument) } } @@ -230,18 +230,18 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai case "snapshotkey": updated.SnapshotKey = container.SnapshotKey default: - return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID) + return fmt.Errorf("cannot update %q field on %q: %w", path, container.ID, errdefs.ErrInvalidArgument) } } if err := validateContainer(&updated); err != nil { - return errors.Wrap(err, "update failed validation") + return fmt.Errorf("update failed validation: %w", err) } updated.CreatedAt = createdat updated.UpdatedAt = time.Now().UTC() if err := writeContainer(cbkt, &updated); err != nil { - return errors.Wrapf(err, "failed to write container %q", container.ID) + return fmt.Errorf("failed to write container %q: %w", container.ID, err) } return nil @@ -261,12 +261,12 @@ func (s *containerStore) Delete(ctx context.Context, id string) error { return update(ctx, s.db, func(tx *bolt.Tx) error { bkt := getContainersBucket(tx, namespace) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace) + return fmt.Errorf("cannot delete container %q in namespace %q: %w", id, namespace, errdefs.ErrNotFound) } if err := bkt.DeleteBucket([]byte(id)); err != nil { if err == bolt.ErrBucketNotFound { - err = errors.Wrapf(errdefs.ErrNotFound, "container %v", id) + err = fmt.Errorf("container %v: %w", id, errdefs.ErrNotFound) } return err } @@ -279,32 +279,32 @@ func (s *containerStore) Delete(ctx context.Context, id string) error { func validateContainer(container *containers.Container) error { if err := identifiers.Validate(container.ID); err != nil { - return errors.Wrap(err, "container.ID") + return fmt.Errorf("container.ID: %w", err) } for k := range container.Extensions { if k == "" { - return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Extension keys must not be zero-length") + return fmt.Errorf("container.Extension keys must not be zero-length: %w", errdefs.ErrInvalidArgument) } } // image has no validation for k, v := range container.Labels { if err := labels.Validate(k, v); err != nil { - return errors.Wrapf(err, "containers.Labels") + return fmt.Errorf("containers.Labels: %w", err) } } if container.Runtime.Name == "" { - return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name must be set") + return fmt.Errorf("container.Runtime.Name must be set: %w", errdefs.ErrInvalidArgument) } if container.Spec == nil { - return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Spec must be set") + return fmt.Errorf("container.Spec must be set: %w", errdefs.ErrInvalidArgument) } if container.SnapshotKey != "" && container.Snapshotter == "" { - return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set if container.SnapshotKey is set") + return fmt.Errorf("container.Snapshotter must be set if container.SnapshotKey is set: %w", errdefs.ErrInvalidArgument) } return nil diff --git a/metadata/containers_test.go b/metadata/containers_test.go index 5ccf483..b45d54c 100644 --- a/metadata/containers_test.go +++ b/metadata/containers_test.go @@ -18,12 +18,11 @@ package metadata import ( "context" + "errors" "fmt" - "io/ioutil" - "os" "path/filepath" "reflect" - "strings" + "runtime" "testing" "time" @@ -35,7 +34,6 @@ import ( "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -153,6 +151,7 @@ func TestContainersList(t *testing.T) { } for _, result := range results { + result := result checkContainersEqual(t, &result, testset[result.ID], "list results did not match") } }) @@ -640,7 +639,7 @@ func TestContainersCreateUpdateDelete(t *testing.T) { if testcase.createerr == nil { t.Fatalf("unexpected error: %v", err) } else { - t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Cause(err), testcase.createerr) + t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Unwrap(err), testcase.createerr) } } else if testcase.createerr != nil { return @@ -662,7 +661,7 @@ func TestContainersCreateUpdateDelete(t *testing.T) { if testcase.cause == nil { t.Fatalf("unexpected error: %v", err) } else { - t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Cause(err), testcase.cause) + t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Unwrap(err), testcase.cause) } } else if testcase.cause != nil { return @@ -695,7 +694,12 @@ func checkContainerTimestamps(t *testing.T, c *containers.Container, now time.Ti } else { // ensure that updatedat is always after createdat if !c.UpdatedAt.After(c.CreatedAt) { - t.Fatalf("timestamp for updatedat not after createdat: %v <= %v", c.UpdatedAt, c.CreatedAt) + if runtime.GOOS == "windows" && c.UpdatedAt == c.CreatedAt { + // Windows' time.Now resolution is lower than Linux, due to Go. + // https://github.com/golang/go/issues/31160 + } else { + t.Fatalf("timestamp for updatedat not after createdat: %v <= %v", c.UpdatedAt, c.CreatedAt) + } } } @@ -715,10 +719,7 @@ func testEnv(t *testing.T) (context.Context, *bolt.DB, func()) { ctx = namespaces.WithNamespace(ctx, "testing") ctx = logtest.WithT(ctx, t) - dirname, err := ioutil.TempDir("", strings.Replace(t.Name(), "/", "_", -1)+"-") - if err != nil { - t.Fatal(err) - } + dirname := t.TempDir() db, err := bolt.Open(filepath.Join(dirname, "meta.db"), 0644, nil) if err != nil { @@ -727,9 +728,6 @@ func testEnv(t *testing.T) (context.Context, *bolt.DB, func()) { return ctx, db, func() { db.Close() - if err := os.RemoveAll(dirname); err != nil { - t.Log("failed removing temp dir", err) - } cancel() } } diff --git a/metadata/content.go b/metadata/content.go index ee68ccf..66d0ee2 100644 --- a/metadata/content.go +++ b/metadata/content.go @@ -19,6 +19,7 @@ package metadata import ( "context" "encoding/binary" + "fmt" "strings" "sync" "sync/atomic" @@ -33,7 +34,6 @@ import ( "github.com/containerd/containerd/namespaces" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -77,7 +77,7 @@ func (cs *contentStore) Info(ctx context.Context, dgst digest.Digest) (content.I if err := view(ctx, cs.db, func(tx *bolt.Tx) error { bkt := getBlobBucket(tx, ns, dgst) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + return fmt.Errorf("content digest %v: %w", dgst, errdefs.ErrNotFound) } info.Digest = dgst @@ -104,11 +104,11 @@ func (cs *contentStore) Update(ctx context.Context, info content.Info, fieldpath if err := update(ctx, cs.db, func(tx *bolt.Tx) error { bkt := getBlobBucket(tx, ns, info.Digest) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", info.Digest) + return fmt.Errorf("content digest %v: %w", info.Digest, errdefs.ErrNotFound) } if err := readInfo(&updated, bkt); err != nil { - return errors.Wrapf(err, "info %q", info.Digest) + return fmt.Errorf("info %q: %w", info.Digest, err) } if len(fieldpaths) > 0 { @@ -127,7 +127,7 @@ func (cs *contentStore) Update(ctx context.Context, info content.Info, fieldpath case "labels": updated.Labels = info.Labels default: - return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) + return fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument) } } } else { @@ -211,7 +211,7 @@ func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error { return update(ctx, cs.db, func(tx *bolt.Tx) error { bkt := getBlobBucket(tx, ns, dgst) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + return fmt.Errorf("content digest %v: %w", dgst, errdefs.ErrNotFound) } if err := getBlobsBucket(tx, ns).DeleteBucket([]byte(dgst.String())); err != nil { @@ -300,7 +300,7 @@ func (cs *contentStore) Status(ctx context.Context, ref string) (content.Status, if err := view(ctx, cs.db, func(tx *bolt.Tx) error { bref = getRef(tx, ns, ref) if bref == "" { - return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + return fmt.Errorf("reference %v: %w", ref, errdefs.ErrNotFound) } return nil @@ -328,15 +328,15 @@ func (cs *contentStore) Abort(ctx context.Context, ref string) error { return update(ctx, cs.db, func(tx *bolt.Tx) error { ibkt := getIngestsBucket(tx, ns) if ibkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + return fmt.Errorf("reference %v: %w", ref, errdefs.ErrNotFound) } bkt := ibkt.Bucket([]byte(ref)) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + return fmt.Errorf("reference %v: %w", ref, errdefs.ErrNotFound) } bref := string(bkt.Get(bucketKeyRef)) if bref == "" { - return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + return fmt.Errorf("reference %v: %w", ref, errdefs.ErrNotFound) } expected := string(bkt.Get(bucketKeyExpected)) if err := ibkt.DeleteBucket([]byte(ref)); err != nil { @@ -367,7 +367,7 @@ func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) ( // TODO(AkihiroSuda): we could create a random string or one calculated based on the context // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 if wOpts.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) } ns, err := namespaces.NamespaceRequired(ctx) if err != nil { @@ -390,7 +390,7 @@ func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) ( // Add content to lease to prevent other reference removals // from effecting this object during a provided lease if err := addContentLease(ctx, tx, wOpts.Desc.Digest); err != nil { - return errors.Wrap(err, "unable to lease content") + return fmt.Errorf("unable to lease content: %w", err) } // Return error outside of transaction to ensure // commit succeeds with the lease. @@ -466,7 +466,7 @@ func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) ( return nil, err } if exists { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", wOpts.Desc.Digest) + return nil, fmt.Errorf("content %v: %w", wOpts.Desc.Digest, errdefs.ErrAlreadyExists) } return &namespacedWriter{ @@ -619,10 +619,10 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, var actual digest.Digest if nw.w == nil { if size != 0 && size != nw.desc.Size { - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, nw.desc.Size, size) + return "", fmt.Errorf("%q failed size validation: %v != %v: %w", nw.ref, nw.desc.Size, size, errdefs.ErrFailedPrecondition) } if expected != "" && expected != nw.desc.Digest { - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q unexpected digest", nw.ref) + return "", fmt.Errorf("%q unexpected digest: %w", nw.ref, errdefs.ErrFailedPrecondition) } size = nw.desc.Size actual = nw.desc.Digest @@ -634,7 +634,7 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, } if size != 0 && size != status.Offset { nw.w.Close() - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size) + return "", fmt.Errorf("%q failed size validation: %v != %v: %w", nw.ref, status.Offset, size, errdefs.ErrFailedPrecondition) } size = status.Offset @@ -647,7 +647,7 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, bkt, err := createBlobBucket(tx, nw.namespace, actual) if err != nil { if err == bolt.ErrBucketExists { - return actual, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual) + return actual, fmt.Errorf("content %v: %w", actual, errdefs.ErrAlreadyExists) } return "", err } @@ -700,7 +700,7 @@ func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) err return view(ctx, cs.db, func(tx *bolt.Tx) error { bkt := getBlobBucket(tx, ns, dgst) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + return fmt.Errorf("content digest %v: %w", dgst, errdefs.ErrNotFound) } return nil }) @@ -709,7 +709,7 @@ func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) err func validateInfo(info *content.Info) error { for k, v := range info.Labels { if err := labels.Validate(k, v); err != nil { - return errors.Wrapf(err, "info.Labels") + return fmt.Errorf("info.Labels: %w", err) } } @@ -740,7 +740,7 @@ func writeInfo(info *content.Info, bkt *bolt.Bucket) error { } if err := boltutil.WriteLabels(bkt, info.Labels); err != nil { - return errors.Wrapf(err, "writing labels for info %v", info.Digest) + return fmt.Errorf("writing labels for info %v: %w", info.Digest, err) } // Write size @@ -772,6 +772,7 @@ func writeExpireAt(expire time.Time, bkt *bolt.Bucket) error { return bkt.Put(bucketKeyExpireAt, expireAt) } +// garbageCollect removes all contents that are no longer used. func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, err error) { cs.l.Lock() t1 := time.Now() diff --git a/metadata/content_test.go b/metadata/content_test.go index a97ec4f..061095c 100644 --- a/metadata/content_test.go +++ b/metadata/content_test.go @@ -19,6 +19,7 @@ package metadata import ( "bytes" "context" + "errors" "fmt" "path/filepath" "sync/atomic" @@ -32,7 +33,6 @@ import ( "github.com/containerd/containerd/namespaces" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -190,11 +190,11 @@ func checkContentLeased(ctx context.Context, db *DB, dgst digest.Digest) error { return db.View(func(tx *bolt.Tx) error { bkt := getBucket(tx, bucketKeyVersion, []byte(ns), bucketKeyObjectLeases, []byte(lease), bucketKeyObjectContent) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "bucket not found %s", lease) + return fmt.Errorf("bucket not found %s: %w", lease, errdefs.ErrNotFound) } v := bkt.Get([]byte(dgst.String())) if v == nil { - return errors.Wrap(errdefs.ErrNotFound, "object not leased") + return fmt.Errorf("object not leased: %w", errdefs.ErrNotFound) } return nil @@ -214,11 +214,11 @@ func checkIngestLeased(ctx context.Context, db *DB, ref string) error { return db.View(func(tx *bolt.Tx) error { bkt := getBucket(tx, bucketKeyVersion, []byte(ns), bucketKeyObjectLeases, []byte(lease), bucketKeyObjectIngests) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "bucket not found %s", lease) + return fmt.Errorf("bucket not found %s: %w", lease, errdefs.ErrNotFound) } v := bkt.Get([]byte(ref)) if v == nil { - return errors.Wrap(errdefs.ErrNotFound, "object not leased") + return fmt.Errorf("object not leased: %w", errdefs.ErrNotFound) } return nil diff --git a/metadata/db.go b/metadata/db.go index 40d045f..2d9cbf3 100644 --- a/metadata/db.go +++ b/metadata/db.go @@ -19,6 +19,8 @@ package metadata import ( "context" "encoding/binary" + "errors" + "fmt" "strings" "sync" "sync/atomic" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" "github.com/containerd/containerd/snapshots" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -181,7 +182,7 @@ func (m *DB) Init(ctx context.Context) error { for _, m := range updates { t0 := time.Now() if err := m.migrate(tx); err != nil { - return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version) + return fmt.Errorf("failed to migrate to %s.%d: %w", m.schema, m.version, err) } log.G(ctx).WithField("d", time.Since(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version) } @@ -276,7 +277,7 @@ func (s GCStats) Elapsed() time.Duration { return s.MetaD } -// GarbageCollect starts garbage collection +// GarbageCollect removes resources (snapshots, contents, ...) that are no longer used. func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { m.wlock.Lock() t1 := time.Now() @@ -307,7 +308,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { } if err := scanAll(ctx, tx, rm); err != nil { - return errors.Wrap(err, "failed to scan and remove") + return fmt.Errorf("failed to scan and remove: %w", err) } return nil @@ -362,6 +363,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { return stats, err } +// getMarked returns all resources that are used. func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { var marked map[gc.Node]struct{} if err := m.db.View(func(tx *bolt.Tx) error { diff --git a/metadata/db_test.go b/metadata/db_test.go index cceef22..707d9a9 100644 --- a/metadata/db_test.go +++ b/metadata/db_test.go @@ -19,11 +19,10 @@ package metadata import ( "context" "encoding/binary" + "errors" "fmt" "io" - "io/ioutil" "math/rand" - "os" "path/filepath" "runtime/pprof" "strings" @@ -44,7 +43,6 @@ import ( "github.com/gogo/protobuf/types" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -74,10 +72,7 @@ func testDB(t *testing.T, opt ...testOpt) (context.Context, *DB, func()) { o(&topts) } - dirname, err := ioutil.TempDir("", strings.Replace(t.Name(), "/", "_", -1)+"-") - if err != nil { - t.Fatal(err) - } + dirname := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(dirname, "native")) if err != nil { @@ -113,9 +108,6 @@ func testDB(t *testing.T, opt ...testOpt) (context.Context, *DB, func()) { return ctx, db, func() { bdb.Close() - if err := os.RemoveAll(dirname); err != nil { - t.Log("failed removing temp dir", err) - } cancel() } } @@ -209,7 +201,7 @@ func TestMigrations(t *testing.T) { check: func(tx *bolt.Tx) error { bkt := getSnapshotterBucket(tx, "testing", "testing") if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "snapshots bucket not found") + return fmt.Errorf("snapshots bucket not found: %w", errdefs.ErrNotFound) } snapshots := []struct { key string @@ -236,7 +228,7 @@ func TestMigrations(t *testing.T) { for _, s := range snapshots { sbkt := bkt.Bucket([]byte(s.key)) if sbkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "key does not exist") + return fmt.Errorf("key does not exist: %w", errdefs.ErrNotFound) } cbkt := sbkt.Bucket(bucketKeyChildren) @@ -246,12 +238,12 @@ func TestMigrations(t *testing.T) { } if cn != len(s.children) { - return errors.Errorf("unexpected number of children %d, expected %d", cn, len(s.children)) + return fmt.Errorf("unexpected number of children %d, expected %d", cn, len(s.children)) } for _, ch := range s.children { if v := cbkt.Get([]byte(ch)); v == nil { - return errors.Errorf("missing child record for %s", ch) + return fmt.Errorf("missing child record for %s", ch) } } } @@ -278,18 +270,18 @@ func TestMigrations(t *testing.T) { check: func(tx *bolt.Tx) error { bkt := getIngestsBucket(tx, "testing") if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "ingests bucket not found") + return fmt.Errorf("ingests bucket not found: %w", errdefs.ErrNotFound) } for _, s := range testRefs { sbkt := bkt.Bucket([]byte(s.ref)) if sbkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "ref does not exist") + return fmt.Errorf("ref does not exist: %w", errdefs.ErrNotFound) } bref := string(sbkt.Get(bucketKeyRef)) if bref != s.bref { - return errors.Errorf("unexpected reference key %q, expected %q", bref, s.bref) + return fmt.Errorf("unexpected reference key %q, expected %q", bref, s.bref) } } @@ -346,11 +338,11 @@ func readDBVersion(db *bolt.DB, schema []byte) (int, error) { if err := db.View(func(tx *bolt.Tx) error { bkt := tx.Bucket(schema) if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "no version bucket") + return fmt.Errorf("no version bucket: %w", errdefs.ErrNotFound) } vb := bkt.Get(bucketKeyDBVersion) if vb == nil { - return errors.Wrap(errdefs.ErrNotFound, "no version value") + return fmt.Errorf("no version value: %w", errdefs.ErrNotFound) } v, _ := binary.Varint(vb) version = int(v) @@ -589,13 +581,13 @@ func create(obj object, tx *bolt.Tx, db *DB, cs content.Store, sn snapshots.Snap content.WithRef("test-ref"), content.WithDescriptor(ocispec.Descriptor{Size: int64(len(v.data)), Digest: expected})) if err != nil { - return nil, errors.Wrap(err, "failed to create writer") + return nil, fmt.Errorf("failed to create writer: %w", err) } if _, err := w.Write(v.data); err != nil { - return nil, errors.Wrap(err, "write blob failed") + return nil, fmt.Errorf("write blob failed: %w", err) } if err := w.Commit(ctx, int64(len(v.data)), expected, content.WithLabels(obj.labels)); err != nil { - return nil, errors.Wrap(err, "failed to commit blob") + return nil, fmt.Errorf("failed to commit blob: %w", err) } if !obj.removed { node = &gc.Node{ @@ -636,7 +628,7 @@ func create(obj object, tx *bolt.Tx, db *DB, cs content.Store, sn snapshots.Snap _, err := NewImageStore(db).Create(ctx, image) if err != nil { - return nil, errors.Wrap(err, "failed to create image") + return nil, fmt.Errorf("failed to create image: %w", err) } case testContainer: container := containers.Container{ @@ -765,10 +757,7 @@ type testLease struct { } func newStores(t testing.TB) (*DB, content.Store, snapshots.Snapshotter, func()) { - td, err := ioutil.TempDir("", "gc-test-") - if err != nil { - t.Fatal(err) - } + td := t.TempDir() db, err := bolt.Open(filepath.Join(td, "meta.db"), 0644, nil) if err != nil { t.Fatal(err) @@ -787,6 +776,7 @@ func newStores(t testing.TB) (*DB, content.Store, snapshots.Snapshotter, func()) mdb := NewDB(db, lcs, map[string]snapshots.Snapshotter{"native": nsn}) return mdb, mdb.ContentStore(), mdb.Snapshotter("native"), func() { - os.RemoveAll(td) + nsn.Close() + db.Close() } } diff --git a/metadata/gc.go b/metadata/gc.go index afe16c9..60bf410 100644 --- a/metadata/gc.go +++ b/metadata/gc.go @@ -25,7 +25,6 @@ import ( "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -59,6 +58,8 @@ var ( labelGCFlat = []byte("containerd.io/gc.flat") ) +// scanRoots sends the given channel "root" resources that are certainly used. +// The caller could look the references of the resources to find all resources that are used. func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { @@ -277,6 +278,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return cerr } +// references finds the resources that are reachable from the given node. func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { switch node.Type { case ResourceContent: @@ -290,7 +292,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) case ResourceSnapshot, resourceSnapshotFlat: parts := strings.SplitN(node.Key, "/", 2) if len(parts) != 2 { - return errors.Errorf("invalid snapshot gc key %s", node.Key) + return fmt.Errorf("invalid snapshot gc key %s", node.Key) } ss := parts[0] name := parts[1] @@ -329,6 +331,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) return nil } +// scanAll finds all resources regardless whether the resources are used or not. func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc.Node) error) error { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { @@ -409,6 +412,7 @@ func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc return nil } +// remove all buckets for the given node. func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { @@ -435,7 +439,7 @@ func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { if sbkt != nil { parts := strings.SplitN(node.Key, "/", 2) if len(parts) != 2 { - return errors.Errorf("invalid snapshot gc key %s", node.Key) + return fmt.Errorf("invalid snapshot gc key %s", node.Key) } ssbkt := sbkt.Bucket([]byte(parts[0])) if ssbkt != nil { diff --git a/metadata/gc_test.go b/metadata/gc_test.go index f8cd999..9a514e9 100644 --- a/metadata/gc_test.go +++ b/metadata/gc_test.go @@ -19,9 +19,7 @@ package metadata import ( "context" "io" - "io/ioutil" "math/rand" - "os" "path/filepath" "sort" "testing" @@ -43,7 +41,7 @@ func TestResourceMax(t *testing.T) { } func TestGCRoots(t *testing.T) { - db, cleanup, err := newDatabase() + db, cleanup, err := newDatabase(t) if err != nil { t.Fatal(err) } @@ -164,7 +162,7 @@ func TestGCRoots(t *testing.T) { } func TestGCRemove(t *testing.T) { - db, cleanup, err := newDatabase() + db, cleanup, err := newDatabase(t) if err != nil { t.Fatal(err) } @@ -257,7 +255,7 @@ func TestGCRemove(t *testing.T) { } func TestGCRefs(t *testing.T) { - db, cleanup, err := newDatabase() + db, cleanup, err := newDatabase(t) if err != nil { t.Fatal(err) } @@ -388,21 +386,16 @@ func TestGCRefs(t *testing.T) { } } -func newDatabase() (*bolt.DB, func(), error) { - td, err := ioutil.TempDir("", "gc-roots-") - if err != nil { - return nil, nil, err - } +func newDatabase(t testing.TB) (*bolt.DB, func(), error) { + td := t.TempDir() db, err := bolt.Open(filepath.Join(td, "test.db"), 0777, nil) if err != nil { - os.RemoveAll(td) return nil, nil, err } return db, func() { db.Close() - os.RemoveAll(td) }, nil } diff --git a/metadata/images.go b/metadata/images.go index cace4e1..8355b71 100644 --- a/metadata/images.go +++ b/metadata/images.go @@ -19,6 +19,7 @@ package metadata import ( "context" "encoding/binary" + "errors" "fmt" "strings" "sync/atomic" @@ -32,7 +33,6 @@ import ( "github.com/containerd/containerd/namespaces" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -56,17 +56,17 @@ func (s *imageStore) Get(ctx context.Context, name string) (images.Image, error) if err := view(ctx, s.db, func(tx *bolt.Tx) error { bkt := getImagesBucket(tx, namespace) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + return fmt.Errorf("image %q: %w", name, errdefs.ErrNotFound) } ibkt := bkt.Bucket([]byte(name)) if ibkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + return fmt.Errorf("image %q: %w", name, errdefs.ErrNotFound) } image.Name = name if err := readImage(&image, ibkt); err != nil { - return errors.Wrapf(err, "image %q", name) + return fmt.Errorf("image %q: %w", name, err) } return nil @@ -85,7 +85,7 @@ func (s *imageStore) List(ctx context.Context, fs ...string) ([]images.Image, er filter, err := filters.ParseAll(fs...) if err != nil { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) } var m []images.Image @@ -141,7 +141,7 @@ func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Ima return err } - return errors.Wrapf(errdefs.ErrAlreadyExists, "image %q", image.Name) + return fmt.Errorf("image %q: %w", image.Name, errdefs.ErrAlreadyExists) } image.CreatedAt = time.Now().UTC() @@ -161,7 +161,7 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths } if image.Name == "" { - return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for update") + return images.Image{}, fmt.Errorf("image name is required for update: %w", errdefs.ErrInvalidArgument) } var updated images.Image @@ -174,11 +174,11 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths ibkt := bkt.Bucket([]byte(image.Name)) if ibkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "image %q", image.Name) + return fmt.Errorf("image %q: %w", image.Name, errdefs.ErrNotFound) } if err := readImage(&updated, ibkt); err != nil { - return errors.Wrapf(err, "image %q", image.Name) + return fmt.Errorf("image %q: %w", image.Name, err) } createdat := updated.CreatedAt updated.Name = image.Name @@ -216,7 +216,7 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths case "annotations": updated.Target.Annotations = image.Target.Annotations default: - return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name) + return fmt.Errorf("cannot update %q field on image %q: %w", path, image.Name, errdefs.ErrInvalidArgument) } } } else { @@ -247,12 +247,12 @@ func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.Del return update(ctx, s.db, func(tx *bolt.Tx) error { bkt := getImagesBucket(tx, namespace) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + return fmt.Errorf("image %q: %w", name, errdefs.ErrNotFound) } if err = bkt.DeleteBucket([]byte(name)); err != nil { if err == bolt.ErrBucketNotFound { - err = errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + err = fmt.Errorf("image %q: %w", name, errdefs.ErrNotFound) } return err } @@ -265,12 +265,12 @@ func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.Del func validateImage(image *images.Image) error { if image.Name == "" { - return errors.Wrapf(errdefs.ErrInvalidArgument, "image name must not be empty") + return fmt.Errorf("image name must not be empty: %w", errdefs.ErrInvalidArgument) } for k, v := range image.Labels { if err := labels.Validate(k, v); err != nil { - return errors.Wrapf(err, "image.Labels") + return fmt.Errorf("image.Labels: %w", err) } } @@ -281,15 +281,15 @@ func validateTarget(target *ocispec.Descriptor) error { // NOTE(stevvooe): Only validate fields we actually store. if err := target.Digest.Validate(); err != nil { - return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Digest %q invalid: %v", target.Digest, err) + return fmt.Errorf("Target.Digest %q invalid: %v: %w", target.Digest, err, errdefs.ErrInvalidArgument) } if target.Size <= 0 { - return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Size must be greater than zero") + return fmt.Errorf("Target.Size must be greater than zero: %w", errdefs.ErrInvalidArgument) } if target.MediaType == "" { - return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.MediaType must be set") + return fmt.Errorf("Target.MediaType must be set: %w", errdefs.ErrInvalidArgument) } return nil @@ -341,11 +341,11 @@ func writeImage(bkt *bolt.Bucket, image *images.Image) error { } if err := boltutil.WriteLabels(bkt, image.Labels); err != nil { - return errors.Wrapf(err, "writing labels for image %v", image.Name) + return fmt.Errorf("writing labels for image %v: %w", image.Name, err) } if err := boltutil.WriteAnnotations(bkt, image.Target.Annotations); err != nil { - return errors.Wrapf(err, "writing Annotations for image %v", image.Name) + return fmt.Errorf("writing Annotations for image %v: %w", image.Name, err) } // write the target bucket diff --git a/metadata/images_test.go b/metadata/images_test.go index 6d87588..323426d 100644 --- a/metadata/images_test.go +++ b/metadata/images_test.go @@ -17,6 +17,7 @@ package metadata import ( + "errors" "fmt" "reflect" "testing" @@ -27,7 +28,6 @@ import ( "github.com/containerd/containerd/images" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) func TestImagesList(t *testing.T) { @@ -129,6 +129,7 @@ func TestImagesList(t *testing.T) { } for _, result := range results { + result := result checkImagesEqual(t, &result, testset[result.Name], "list results did not match") } }) @@ -500,7 +501,7 @@ func TestImagesCreateUpdateDelete(t *testing.T) { if testcase.createerr == nil { t.Fatalf("unexpected error: %v", err) } else { - t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Cause(err), testcase.createerr) + t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Unwrap(err), testcase.createerr) } } else if testcase.createerr != nil { return @@ -522,7 +523,7 @@ func TestImagesCreateUpdateDelete(t *testing.T) { if testcase.cause == nil { t.Fatalf("unexpected error: %v", err) } else { - t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Cause(err), testcase.cause) + t.Fatalf("cause of %v (cause: %v) != %v", err, errors.Unwrap(err), testcase.cause) } } else if testcase.cause != nil { return diff --git a/metadata/leases.go b/metadata/leases.go index 60da06b..03fa75a 100644 --- a/metadata/leases.go +++ b/metadata/leases.go @@ -18,6 +18,7 @@ package metadata import ( "context" + "errors" "fmt" "strings" "sync/atomic" @@ -29,26 +30,25 @@ import ( "github.com/containerd/containerd/metadata/boltutil" "github.com/containerd/containerd/namespaces" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) -// LeaseManager manages the create/delete lifecycle of leases +// leaseManager manages the create/delete lifecycle of leases // and also returns existing leases -type LeaseManager struct { +type leaseManager struct { db *DB } // NewLeaseManager creates a new lease manager for managing leases using // the provided database transaction. -func NewLeaseManager(db *DB) *LeaseManager { - return &LeaseManager{ +func NewLeaseManager(db *DB) leases.Manager { + return &leaseManager{ db: db, } } // Create creates a new lease using the provided lease -func (lm *LeaseManager) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { +func (lm *leaseManager) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { var l leases.Lease for _, opt := range opts { if err := opt(&l); err != nil { @@ -75,7 +75,7 @@ func (lm *LeaseManager) Create(ctx context.Context, opts ...leases.Opt) (leases. if err == bolt.ErrBucketExists { err = errdefs.ErrAlreadyExists } - return errors.Wrapf(err, "lease %q", l.ID) + return fmt.Errorf("lease %q: %w", l.ID, err) } t := time.Now().UTC() @@ -102,7 +102,7 @@ func (lm *LeaseManager) Create(ctx context.Context, opts ...leases.Opt) (leases. } // Delete deletes the lease with the provided lease ID -func (lm *LeaseManager) Delete(ctx context.Context, lease leases.Lease, _ ...leases.DeleteOpt) error { +func (lm *leaseManager) Delete(ctx context.Context, lease leases.Lease, _ ...leases.DeleteOpt) error { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return err @@ -111,11 +111,11 @@ func (lm *LeaseManager) Delete(ctx context.Context, lease leases.Lease, _ ...lea return update(ctx, lm.db, func(tx *bolt.Tx) error { topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) if topbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + return fmt.Errorf("lease %q: %w", lease.ID, errdefs.ErrNotFound) } if err := topbkt.DeleteBucket([]byte(lease.ID)); err != nil { if err == bolt.ErrBucketNotFound { - err = errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + err = fmt.Errorf("lease %q: %w", lease.ID, errdefs.ErrNotFound) } return err } @@ -127,7 +127,7 @@ func (lm *LeaseManager) Delete(ctx context.Context, lease leases.Lease, _ ...lea } // List lists all active leases -func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, error) { +func (lm *leaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, error) { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -135,7 +135,7 @@ func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, filter, err := filters.ParseAll(fs...) if err != nil { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) } var ll []leases.Lease @@ -183,7 +183,7 @@ func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, } // AddResource references the resource by the provided lease. -func (lm *LeaseManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { +func (lm *leaseManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return err @@ -192,7 +192,7 @@ func (lm *LeaseManager) AddResource(ctx context.Context, lease leases.Lease, r l return update(ctx, lm.db, func(tx *bolt.Tx) error { topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) if topbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + return fmt.Errorf("lease %q: %w", lease.ID, errdefs.ErrNotFound) } keys, ref, err := parseLeaseResource(r) @@ -212,7 +212,7 @@ func (lm *LeaseManager) AddResource(ctx context.Context, lease leases.Lease, r l } // DeleteResource dereferences the resource by the provided lease. -func (lm *LeaseManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { +func (lm *leaseManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return err @@ -221,7 +221,7 @@ func (lm *LeaseManager) DeleteResource(ctx context.Context, lease leases.Lease, return update(ctx, lm.db, func(tx *bolt.Tx) error { topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) if topbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + return fmt.Errorf("lease %q: %w", lease.ID, errdefs.ErrNotFound) } keys, ref, err := parseLeaseResource(r) @@ -250,7 +250,7 @@ func (lm *LeaseManager) DeleteResource(ctx context.Context, lease leases.Lease, } // ListResources lists all the resources referenced by the lease. -func (lm *LeaseManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { +func (lm *leaseManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -262,7 +262,7 @@ func (lm *LeaseManager) ListResources(ctx context.Context, lease leases.Lease) ( topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) if topbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + return fmt.Errorf("lease %q: %w", lease.ID, errdefs.ErrNotFound) } // content resources @@ -333,7 +333,7 @@ func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + return fmt.Errorf("lease does not exist: %w", errdefs.ErrNotFound) } bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectSnapshots) @@ -382,7 +382,7 @@ func addContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + return fmt.Errorf("lease does not exist: %w", errdefs.ErrNotFound) } bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectContent) @@ -426,7 +426,7 @@ func addIngestLease(ctx context.Context, tx *bolt.Tx, ref string) (bool, error) bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) if bkt == nil { - return false, errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + return false, fmt.Errorf("lease does not exist: %w", errdefs.ErrNotFound) } bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectIngests) @@ -473,22 +473,22 @@ func parseLeaseResource(r leases.Resource) ([]string, string, error) { string(bucketKeyObjectIngests): if len(keys) != 1 { - return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid resource type %s", typ) + return nil, "", fmt.Errorf("invalid resource type %s: %w", typ, errdefs.ErrInvalidArgument) } if k == string(bucketKeyObjectContent) { dgst, err := digest.Parse(ref) if err != nil { - return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid content resource id %s: %v", ref, err) + return nil, "", fmt.Errorf("invalid content resource id %s: %v: %w", ref, err, errdefs.ErrInvalidArgument) } ref = dgst.String() } case string(bucketKeyObjectSnapshots): if len(keys) != 2 { - return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid snapshot resource type %s", typ) + return nil, "", fmt.Errorf("invalid snapshot resource type %s: %w", typ, errdefs.ErrInvalidArgument) } default: - return nil, "", errors.Wrapf(errdefs.ErrNotImplemented, "resource type %s not supported yet", typ) + return nil, "", fmt.Errorf("resource type %s not supported yet: %w", typ, errdefs.ErrNotImplemented) } return keys, ref, nil diff --git a/metadata/leases_test.go b/metadata/leases_test.go index 383c842..be120ce 100644 --- a/metadata/leases_test.go +++ b/metadata/leases_test.go @@ -18,11 +18,12 @@ package metadata import ( _ "crypto/sha256" + "errors" + "fmt" "testing" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/leases" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -365,7 +366,7 @@ func TestLeaseResource(t *testing.T) { if err := db.Update(func(tx *bolt.Tx) error { err0 := lm.AddResource(WithTransactionContext(ctx, tx), tc.lease, tc.resource) if !errors.Is(err0, tc.err) { - return errors.Errorf("expect error (%v), but got (%v)", tc.err, err0) + return fmt.Errorf("expect error (%v), but got (%v)", tc.err, err0) } if err0 == nil { diff --git a/metadata/namespaces.go b/metadata/namespaces.go index 165c09f..84eb83f 100644 --- a/metadata/namespaces.go +++ b/metadata/namespaces.go @@ -18,13 +18,13 @@ package metadata import ( "context" + "fmt" "strings" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/identifiers" l "github.com/containerd/containerd/labels" "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -49,7 +49,7 @@ func (s *namespaceStore) Create(ctx context.Context, namespace string, labels ma for k, v := range labels { if err := l.Validate(k, v); err != nil { - return errors.Wrapf(err, "namespace.Labels") + return fmt.Errorf("namespace.Labels: %w", err) } } @@ -57,7 +57,7 @@ func (s *namespaceStore) Create(ctx context.Context, namespace string, labels ma bkt, err := topbkt.CreateBucket([]byte(namespace)) if err != nil { if err == bolt.ErrBucketExists { - return errors.Wrapf(errdefs.ErrAlreadyExists, "namespace %q", namespace) + return fmt.Errorf("namespace %q: %w", namespace, errdefs.ErrAlreadyExists) } return err @@ -97,7 +97,7 @@ func (s *namespaceStore) Labels(ctx context.Context, namespace string) (map[stri func (s *namespaceStore) SetLabel(ctx context.Context, namespace, key, value string) error { if err := l.Validate(key, value); err != nil { - return errors.Wrapf(err, "namespace.Labels") + return fmt.Errorf("namespace.Labels: %w", err) } return withNamespacesLabelsBucket(s.tx, namespace, func(bkt *bolt.Bucket) error { @@ -147,16 +147,16 @@ func (s *namespaceStore) Delete(ctx context.Context, namespace string, opts ...n } if len(types) > 0 { - return errors.Wrapf( - errdefs.ErrFailedPrecondition, - "namespace %q must be empty, but it still has %s", + return fmt.Errorf( + "namespace %q must be empty, but it still has %s: %w", namespace, strings.Join(types, ", "), + errdefs.ErrFailedPrecondition, ) } if err := bkt.DeleteBucket([]byte(namespace)); err != nil { if err == bolt.ErrBucketNotFound { - return errors.Wrapf(errdefs.ErrNotFound, "namespace %q", namespace) + return fmt.Errorf("namespace %q: %w", namespace, errdefs.ErrNotFound) } return err @@ -184,7 +184,7 @@ func (s *namespaceStore) listNs(namespace string) ([]string, error) { if err := snbkt.ForEach(func(k, v []byte) error { if v == nil { if !isBucketEmpty(snbkt.Bucket(k)) { - out = append(out, "snapshot-"+string(k)) + out = append(out, fmt.Sprintf("snapshots on %q snapshotter", k)) } } return nil diff --git a/metadata/namespaces_test.go b/metadata/namespaces_test.go index f434ddf..31c1bd8 100644 --- a/metadata/namespaces_test.go +++ b/metadata/namespaces_test.go @@ -54,10 +54,22 @@ func TestCreateDelete(t *testing.T) { Spec: &types.Any{}, }) require.NoError(t, err) + + db.Update(func(tx *bbolt.Tx) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + bucket, err := createSnapshotterBucket(tx, ns, "testss") + if err != nil { + return err + } + return bucket.Put([]byte("key"), []byte("value")) + }) }, validate: func(t *testing.T, err error) { require.Error(t, err) - assert.Contains(t, err.Error(), "still has containers") + assert.Contains(t, err.Error(), `still has containers, snapshots on "testss" snapshotter`) }, }, } diff --git a/metadata/snapshot.go b/metadata/snapshot.go index 389aeda..3486020 100644 --- a/metadata/snapshot.go +++ b/metadata/snapshot.go @@ -32,7 +32,6 @@ import ( "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/snapshots" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -88,7 +87,7 @@ func (s *snapshotter) resolveKey(ctx context.Context, key string) (string, error if err := view(ctx, s.db, func(tx *bolt.Tx) error { id = getKey(tx, ns, s.name, key) if id == "" { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + return fmt.Errorf("snapshot %v does not exist: %w", key, errdefs.ErrNotFound) } return nil }); err != nil { @@ -113,18 +112,18 @@ func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, err if err := view(ctx, s.db, func(tx *bolt.Tx) error { bkt := getSnapshotterBucket(tx, ns, s.name) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + return fmt.Errorf("snapshot %v does not exist: %w", key, errdefs.ErrNotFound) } sbkt := bkt.Bucket([]byte(key)) if sbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + return fmt.Errorf("snapshot %v does not exist: %w", key, errdefs.ErrNotFound) } local.Labels, err = boltutil.ReadLabels(sbkt) if err != nil { - return errors.Wrap(err, "failed to read labels") + return fmt.Errorf("failed to read labels: %w", err) } if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil { - return errors.Wrap(err, "failed to read timestamps") + return fmt.Errorf("failed to read timestamps: %w", err) } bkey = string(sbkt.Get(bucketKeyName)) local.Parent = string(sbkt.Get(bucketKeyParent)) @@ -152,7 +151,7 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath } if info.Name == "" { - return snapshots.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "") + return snapshots.Info{}, errdefs.ErrInvalidArgument } var ( @@ -165,19 +164,19 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath if err := update(ctx, s.db, func(tx *bolt.Tx) error { bkt := getSnapshotterBucket(tx, ns, s.name) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name) + return fmt.Errorf("snapshot %v does not exist: %w", info.Name, errdefs.ErrNotFound) } sbkt := bkt.Bucket([]byte(info.Name)) if sbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name) + return fmt.Errorf("snapshot %v does not exist: %w", info.Name, errdefs.ErrNotFound) } local.Labels, err = boltutil.ReadLabels(sbkt) if err != nil { - return errors.Wrap(err, "failed to read labels") + return fmt.Errorf("failed to read labels: %w", err) } if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil { - return errors.Wrap(err, "failed to read timestamps") + return fmt.Errorf("failed to read timestamps: %w", err) } // Handle field updates @@ -197,7 +196,7 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath case "labels": local.Labels = info.Labels default: - return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name) + return fmt.Errorf("cannot update %q field on snapshot %q: %w", path, info.Name, errdefs.ErrInvalidArgument) } } } else { @@ -209,10 +208,10 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath local.Updated = time.Now().UTC() if err := boltutil.WriteTimestamps(sbkt, local.Created, local.Updated); err != nil { - return errors.Wrap(err, "failed to read timestamps") + return fmt.Errorf("failed to read timestamps: %w", err) } if err := boltutil.WriteLabels(sbkt, local.Labels); err != nil { - return errors.Wrap(err, "failed to read labels") + return fmt.Errorf("failed to read labels: %w", err) } bkey = string(sbkt.Get(bucketKeyName)) local.Parent = string(sbkt.Get(bucketKeyParent)) @@ -319,18 +318,18 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re // Check if target exists, if so, return already exists if target != "" { if tbkt := bkt.Bucket([]byte(target)); tbkt != nil { - return errors.Wrapf(errdefs.ErrAlreadyExists, "target snapshot %q", target) + return fmt.Errorf("target snapshot %q: %w", target, errdefs.ErrAlreadyExists) } } if bbkt := bkt.Bucket([]byte(key)); bbkt != nil { - return errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", key) + return fmt.Errorf("snapshot %q: %w", key, errdefs.ErrAlreadyExists) } if parent != "" { pbkt := bkt.Bucket([]byte(parent)) if pbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", parent) + return fmt.Errorf("parent snapshot %v does not exist: %w", parent, errdefs.ErrNotFound) } bparent = string(pbkt.Get(bucketKeyName)) } @@ -378,11 +377,11 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return nil }, filter); err != nil { - return nil, errors.Wrap(err, "failed walking backend snapshots") + return nil, fmt.Errorf("failed walking backend snapshots: %w", err) } if tinfo == nil { - return nil, errors.Wrapf(errdefs.ErrNotFound, "target snapshot %q in backend", target) + return nil, fmt.Errorf("target snapshot %q in backend: %w", target, errdefs.ErrNotFound) } key = target @@ -401,12 +400,12 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re } // Propagate this error after the final update - rerr = errors.Wrapf(errdefs.ErrAlreadyExists, "target snapshot %q from snapshotter", target) + rerr = fmt.Errorf("target snapshot %q from snapshotter: %w", target, errdefs.ErrAlreadyExists) } else { // This condition is unexpected as the key provided is expected // to be new and unique, return as unknown response from backend // to avoid confusing callers handling already exists. - return nil, errors.Wrapf(errdefs.ErrUnknown, "unexpected error from snapshotter: %v", err) + return nil, fmt.Errorf("unexpected error from snapshotter: %v: %w", err, errdefs.ErrUnknown) } } else if err != nil { return nil, err @@ -420,7 +419,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re if txerr := update(ctx, s.db, func(tx *bolt.Tx) error { bkt := getSnapshotterBucket(tx, ns, s.name) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "can not find snapshotter %q", s.name) + return fmt.Errorf("can not find snapshotter %q: %w", s.name, errdefs.ErrNotFound) } if err := addSnapshotLease(ctx, tx, s.name, key); err != nil { @@ -433,7 +432,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return err } if rerr == nil { - rerr = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", key) + rerr = fmt.Errorf("snapshot %q: %w", key, errdefs.ErrAlreadyExists) } return nil } @@ -441,7 +440,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re if parent != "" { pbkt := bkt.Bucket([]byte(parent)) if pbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", parent) + return fmt.Errorf("parent snapshot %v does not exist: %w", parent, errdefs.ErrNotFound) } // Ensure the backend's parent matches the metadata store's parent @@ -451,7 +450,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re // uniqueness of the reference relationships, the metadata store // can only error out to prevent inconsistent data. if bparent != string(pbkt.Get(bucketKeyName)) { - return errors.Wrapf(errdefs.ErrInvalidArgument, "mismatched parent %s from target %s", parent, target) + return fmt.Errorf("mismatched parent %s from target %s: %w", parent, target, errdefs.ErrInvalidArgument) } cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) @@ -516,14 +515,14 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap if err := update(ctx, s.db, func(tx *bolt.Tx) error { bkt := getSnapshotterBucket(tx, ns, s.name) if bkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, - "can not find snapshotter %q", s.name) + return fmt.Errorf("can not find snapshotter %q: %w", + s.name, errdefs.ErrNotFound) } bbkt, err := bkt.CreateBucket([]byte(name)) if err != nil { if err == bolt.ErrBucketExists { - err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", name) + err = fmt.Errorf("snapshot %q: %w", name, errdefs.ErrAlreadyExists) } return err } @@ -533,7 +532,7 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap obkt := bkt.Bucket([]byte(key)) if obkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + return fmt.Errorf("snapshot %v does not exist: %w", key, errdefs.ErrNotFound) } bkey := string(obkt.Get(bucketKeyName)) @@ -553,7 +552,7 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap if len(parent) > 0 { pbkt := bkt.Bucket(parent) if pbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent)) + return fmt.Errorf("parent snapshot %v does not exist: %w", string(parent), errdefs.ErrNotFound) } cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) @@ -639,13 +638,13 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { sbkt = bkt.Bucket([]byte(key)) } if sbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + return fmt.Errorf("snapshot %v does not exist: %w", key, errdefs.ErrNotFound) } cbkt := sbkt.Bucket(bucketKeyChildren) if cbkt != nil { if child, _ := cbkt.Cursor().First(); child != nil { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child") + return fmt.Errorf("cannot remove snapshot with child: %w", errdefs.ErrFailedPrecondition) } } @@ -653,12 +652,12 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { if len(parent) > 0 { pbkt := bkt.Bucket(parent) if pbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent)) + return fmt.Errorf("parent snapshot %v does not exist: %w", string(parent), errdefs.ErrNotFound) } cbkt := pbkt.Bucket(bucketKeyChildren) if cbkt != nil { if err := cbkt.Delete([]byte(key)); err != nil { - return errors.Wrap(err, "failed to remove child link") + return fmt.Errorf("failed to remove child link: %w", err) } } } @@ -784,13 +783,14 @@ func (s *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...str func validateSnapshot(info *snapshots.Info) error { for k, v := range info.Labels { if err := labels.Validate(k, v); err != nil { - return errors.Wrapf(err, "info.Labels") + return fmt.Errorf("info.Labels: %w", err) } } return nil } +// garbageCollect removes all snapshots that are no longer used. func (s *snapshotter) garbageCollect(ctx context.Context) (d time.Duration, err error) { s.l.Lock() t1 := time.Now() diff --git a/metadata/snapshot_test.go b/metadata/snapshot_test.go index 93c3c9d..9cfb205 100644 --- a/metadata/snapshot_test.go +++ b/metadata/snapshot_test.go @@ -18,6 +18,7 @@ package metadata import ( "context" + "fmt" "os" "path/filepath" "reflect" @@ -34,7 +35,6 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/native" "github.com/containerd/containerd/snapshots/testsuite" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -319,7 +319,7 @@ func (s *tmpSnapshotter) create(ctx context.Context, key, parent string, kind sn if target != "" { for _, name := range s.targets[target] { if s.snapshots[name].Parent == parent { - return nil, errors.Wrap(errdefs.ErrAlreadyExists, "found target") + return nil, fmt.Errorf("found target: %w", errdefs.ErrAlreadyExists) } } } @@ -355,7 +355,7 @@ func (s *tmpSnapshotter) Commit(ctx context.Context, name, key string, opts ...s base.Kind = snapshots.KindCommitted if _, ok := s.snapshots[name]; ok { - return errors.Wrap(errdefs.ErrAlreadyExists, "found name") + return fmt.Errorf("found name: %w", errdefs.ErrAlreadyExists) } src, ok := s.snapshots[key] diff --git a/metrics/cgroups/cgroups.go b/metrics/cgroups/cgroups.go index 6807b19..4d2e128 100644 --- a/metrics/cgroups/cgroups.go +++ b/metrics/cgroups/cgroups.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,6 +21,7 @@ package cgroups import ( "github.com/containerd/cgroups" + "github.com/containerd/containerd/events" v1 "github.com/containerd/containerd/metrics/cgroups/v1" v2 "github.com/containerd/containerd/metrics/cgroups/v2" "github.com/containerd/containerd/platforms" @@ -38,6 +40,9 @@ func init() { Type: plugin.TaskMonitorPlugin, ID: "cgroups", InitFn: New, + Requires: []plugin.Type{ + plugin.EventPlugin, + }, Config: &Config{}, }) } @@ -53,10 +58,16 @@ func New(ic *plugin.InitContext) (interface{}, error) { tm runtime.TaskMonitor err error ) + + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, err + } + if cgroups.Mode() == cgroups.Unified { - tm, err = v2.NewTaskMonitor(ic.Context, ic.Events, ns) + tm, err = v2.NewTaskMonitor(ic.Context, ep.(events.Publisher), ns) } else { - tm, err = v1.NewTaskMonitor(ic.Context, ic.Events, ns) + tm, err = v1.NewTaskMonitor(ic.Context, ep.(events.Publisher), ns) } if err != nil { return nil, err diff --git a/pkg/cri/platforms/default_windows.go b/metrics/cgroups/common/type.go similarity index 71% rename from pkg/cri/platforms/default_windows.go rename to metrics/cgroups/common/type.go index 20792d0..b192356 100644 --- a/pkg/cri/platforms/default_windows.go +++ b/metrics/cgroups/common/type.go @@ -1,4 +1,5 @@ -// +build windows +//go:build linux +// +build linux /* Copyright The containerd Authors. @@ -16,13 +17,17 @@ limitations under the License. */ -package platforms +package common import ( - "github.com/containerd/containerd/platforms" + "context" + + "github.com/gogo/protobuf/types" ) -// Default returns the current platform's default platform specification. -func Default() platforms.MatchComparer { - return platforms.Default() +// Statable type that returns cgroup metrics +type Statable interface { + ID() string + Namespace() string + Stats(context.Context) (*types.Any, error) } diff --git a/metrics/cgroups/metrics_test.go b/metrics/cgroups/metrics_test.go new file mode 100644 index 0000000..c71ea60 --- /dev/null +++ b/metrics/cgroups/metrics_test.go @@ -0,0 +1,158 @@ +//go:build linux +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "context" + "strconv" + "sync" + "testing" + "time" + + "github.com/containerd/cgroups" + "github.com/containerd/containerd/metrics/cgroups/common" + v1 "github.com/containerd/containerd/metrics/cgroups/v1" + v2 "github.com/containerd/containerd/metrics/cgroups/v2" + v1types "github.com/containerd/containerd/metrics/types/v1" + v2types "github.com/containerd/containerd/metrics/types/v2" + "github.com/containerd/typeurl" + "github.com/prometheus/client_golang/prometheus" + + metrics "github.com/docker/go-metrics" + "github.com/gogo/protobuf/types" +) + +// TestRegressionIssue6772 should not have dead-lock when Collect and Add run +// in the same time. +// +// Issue: https://github.com/containerd/containerd/issues/6772. +func TestRegressionIssue6772(t *testing.T) { + ns := metrics.NewNamespace("test-container", "", nil) + isV1 := true + + var collecter Collecter + if cgroups.Mode() == cgroups.Unified { + isV1 = false + collecter = v2.NewCollector(ns) + } else { + collecter = v1.NewCollector(ns) + } + + doneCh := make(chan struct{}) + defer close(doneCh) + + maxItem := 100 + startCh := make(chan struct{}) + + metricCh := make(chan prometheus.Metric, maxItem) + + go func() { + for { + select { + case <-doneCh: + return + case <-metricCh: + } + } + }() + + go func() { + // pulling the metrics to trigger dead-lock + ns.Collect(metricCh) + close(startCh) + + for { + select { + case <-doneCh: + return + default: + } + + ns.Collect(metricCh) + } + }() + <-startCh + + labels := map[string]string{"issue": "6772"} + errCh := make(chan error, 1) + + var wg sync.WaitGroup + for i := 0; i < maxItem; i++ { + id := i + wg.Add(1) + + go func() { + defer wg.Done() + + err := collecter.Add( + &mockStatT{ + id: strconv.Itoa(id), + namespace: "issue6772", + isV1: isV1, + }, + labels, + ) + if err != nil { + errCh <- err + } + }() + } + + finishedCh := make(chan struct{}) + go func() { + defer close(finishedCh) + + wg.Wait() + }() + + select { + case err := <-errCh: + t.Fatalf("unexpected error: %v", err) + case <-finishedCh: + case <-time.After(30 * time.Second): + t.Fatal("should finish the Add in time") + } +} + +type Collecter interface { + Collect(ch chan<- prometheus.Metric) + + Add(t common.Statable, labels map[string]string) error +} + +type mockStatT struct { + id, namespace string + isV1 bool +} + +func (t *mockStatT) ID() string { + return t.id +} + +func (t *mockStatT) Namespace() string { + return t.namespace +} + +func (t *mockStatT) Stats(context.Context) (*types.Any, error) { + if t.isV1 { + return typeurl.MarshalAny(&v1types.Metrics{}) + } + return typeurl.MarshalAny(&v2types.Metrics{}) +} diff --git a/metrics/cgroups/v1/blkio.go b/metrics/cgroups/v1/blkio.go index d532bc7..2348f37 100644 --- a/metrics/cgroups/v1/blkio.go +++ b/metrics/cgroups/v1/blkio.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v1/cgroups.go b/metrics/cgroups/v1/cgroups.go index 80cf20a..d80d13e 100644 --- a/metrics/cgroups/v1/cgroups.go +++ b/metrics/cgroups/v1/cgroups.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -29,7 +30,7 @@ import ( "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/v1/linux" - metrics "github.com/docker/go-metrics" + "github.com/docker/go-metrics" "github.com/sirupsen/logrus" ) @@ -55,8 +56,8 @@ type cgroupsMonitor struct { publisher events.Publisher } -func (m *cgroupsMonitor) Monitor(c runtime.Task) error { - if err := m.collector.Add(c); err != nil { +func (m *cgroupsMonitor) Monitor(c runtime.Task, labels map[string]string) error { + if err := m.collector.Add(c, labels); err != nil { return err } t, ok := c.(*linux.Task) diff --git a/metrics/cgroups/v1/cpu.go b/metrics/cgroups/v1/cpu.go index 0abe0fb..77996d9 100644 --- a/metrics/cgroups/v1/cpu.go +++ b/metrics/cgroups/v1/cpu.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v1/hugetlb.go b/metrics/cgroups/v1/hugetlb.go index 97ca890..e29b057 100644 --- a/metrics/cgroups/v1/hugetlb.go +++ b/metrics/cgroups/v1/hugetlb.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v1/memory.go b/metrics/cgroups/v1/memory.go index 915a36c..86f6196 100644 --- a/metrics/cgroups/v1/memory.go +++ b/metrics/cgroups/v1/memory.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v1/metric.go b/metrics/cgroups/v1/metric.go index dd9c303..c4f2947 100644 --- a/metrics/cgroups/v1/metric.go +++ b/metrics/cgroups/v1/metric.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v1/metrics.go b/metrics/cgroups/v1/metrics.go index eac3e26..f73b060 100644 --- a/metrics/cgroups/v1/metrics.go +++ b/metrics/cgroups/v1/metrics.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -25,21 +26,14 @@ import ( "github.com/containerd/cgroups" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metrics/cgroups/common" v1 "github.com/containerd/containerd/metrics/types/v1" "github.com/containerd/containerd/namespaces" "github.com/containerd/typeurl" - metrics "github.com/docker/go-metrics" - "github.com/gogo/protobuf/types" + "github.com/docker/go-metrics" "github.com/prometheus/client_golang/prometheus" ) -// Statable type that returns cgroup metrics -type Statable interface { - ID() string - Namespace() string - Stats(context.Context) (*types.Any, error) -} - // Trigger will be called when an event happens and provides the cgroup // where the event originated from type Trigger func(string, string, cgroups.Cgroup) @@ -53,7 +47,7 @@ func NewCollector(ns *metrics.Namespace) *Collector { // add machine cpus and memory info c := &Collector{ ns: ns, - tasks: make(map[string]Statable), + tasks: make(map[string]entry), } c.metrics = append(c.metrics, pidMetrics...) c.metrics = append(c.metrics, cpuMetrics...) @@ -69,15 +63,44 @@ func taskID(id, namespace string) string { return fmt.Sprintf("%s-%s", id, namespace) } +type entry struct { + task common.Statable + // ns is an optional child namespace that contains additional to parent labels. + // This can be used to append task specific labels to be able to differentiate the different containerd metrics. + ns *metrics.Namespace +} + // Collector provides the ability to collect container stats and export // them in the prometheus format type Collector struct { - mu sync.RWMutex - - tasks map[string]Statable ns *metrics.Namespace - metrics []*metric storedMetrics chan prometheus.Metric + + // TODO(fuweid): + // + // The Collector.Collect will be the field ns'Collect's callback, + // which be invoked periodically with internal lock. And Collector.Add + // might also invoke ns.Lock if the labels is not nil, which is easy to + // cause dead-lock. + // + // Goroutine X: + // + // ns.Collect + // ns.Lock + // Collector.Collect + // Collector.RLock + // + // + // Goroutine Y: + // + // Collector.Add + // ...(RLock/Lock) + // ns.Lock + // + // I think we should seek the way to decouple ns from Collector. + mu sync.RWMutex + tasks map[string]entry + metrics []*metric } // Describe prometheus metrics @@ -109,10 +132,11 @@ storedLoop: wg.Wait() } -func (c *Collector) collect(t Statable, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) { +func (c *Collector) collect(entry entry, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) { if wg != nil { defer wg.Done() } + t := entry.task ctx := namespaces.WithNamespace(context.Background(), t.Namespace()) stats, err := t.Stats(ctx) if err != nil { @@ -129,28 +153,41 @@ func (c *Collector) collect(t Statable, ch chan<- prometheus.Metric, block bool, log.L.WithError(err).Errorf("invalid metric type for %s", t.ID()) return } + ns := entry.ns + if ns == nil { + ns = c.ns + } for _, m := range c.metrics { - m.collect(t.ID(), t.Namespace(), s, c.ns, ch, block) + m.collect(t.ID(), t.Namespace(), s, ns, ch, block) } } // Add adds the provided cgroup and id so that metrics are collected and exported -func (c *Collector) Add(t Statable) error { +func (c *Collector) Add(t common.Statable, labels map[string]string) error { if c.ns == nil { return nil } - c.mu.Lock() - defer c.mu.Unlock() + c.mu.RLock() id := taskID(t.ID(), t.Namespace()) - if _, ok := c.tasks[id]; ok { + _, ok := c.tasks[id] + c.mu.RUnlock() + if ok { return nil // requests to collect metrics should be idempotent } - c.tasks[id] = t + + entry := entry{task: t} + if labels != nil { + entry.ns = c.ns.WithConstLabels(labels) + } + + c.mu.Lock() + c.tasks[id] = entry + c.mu.Unlock() return nil } // Remove removes the provided cgroup by id from the collector -func (c *Collector) Remove(t Statable) { +func (c *Collector) Remove(t common.Statable) { if c.ns == nil { return } @@ -165,6 +202,6 @@ func (c *Collector) RemoveAll() { return } c.mu.Lock() - c.tasks = make(map[string]Statable) + c.tasks = make(map[string]entry) c.mu.Unlock() } diff --git a/metrics/cgroups/v1/oom.go b/metrics/cgroups/v1/oom.go index a31b813..a7fa265 100644 --- a/metrics/cgroups/v1/oom.go +++ b/metrics/cgroups/v1/oom.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v1/pids.go b/metrics/cgroups/v1/pids.go index 6bb1876..3d60cd4 100644 --- a/metrics/cgroups/v1/pids.go +++ b/metrics/cgroups/v1/pids.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v2/cgroups.go b/metrics/cgroups/v2/cgroups.go index 4159190..f8a6aa9 100644 --- a/metrics/cgroups/v2/cgroups.go +++ b/metrics/cgroups/v2/cgroups.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -23,7 +24,7 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/runtime" - metrics "github.com/docker/go-metrics" + "github.com/docker/go-metrics" ) // NewTaskMonitor returns a new cgroups monitor @@ -42,8 +43,8 @@ type cgroupsMonitor struct { publisher events.Publisher } -func (m *cgroupsMonitor) Monitor(c runtime.Task) error { - if err := m.collector.Add(c); err != nil { +func (m *cgroupsMonitor) Monitor(c runtime.Task, labels map[string]string) error { + if err := m.collector.Add(c, labels); err != nil { return err } return nil diff --git a/metrics/cgroups/v2/cpu.go b/metrics/cgroups/v2/cpu.go index c9a48ad..29f3dd3 100644 --- a/metrics/cgroups/v2/cpu.go +++ b/metrics/cgroups/v2/cpu.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v2/io.go b/metrics/cgroups/v2/io.go index 79980a5..b626737 100644 --- a/metrics/cgroups/v2/io.go +++ b/metrics/cgroups/v2/io.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v2/memory.go b/metrics/cgroups/v2/memory.go index 7d0332a..41813a3 100644 --- a/metrics/cgroups/v2/memory.go +++ b/metrics/cgroups/v2/memory.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v2/metric.go b/metrics/cgroups/v2/metric.go index ea995d2..f42b70d 100644 --- a/metrics/cgroups/v2/metric.go +++ b/metrics/cgroups/v2/metric.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/cgroups/v2/metrics.go b/metrics/cgroups/v2/metrics.go index e8bade1..00c498c 100644 --- a/metrics/cgroups/v2/metrics.go +++ b/metrics/cgroups/v2/metrics.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -24,21 +25,14 @@ import ( "sync" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metrics/cgroups/common" v2 "github.com/containerd/containerd/metrics/types/v2" "github.com/containerd/containerd/namespaces" "github.com/containerd/typeurl" - metrics "github.com/docker/go-metrics" - "github.com/gogo/protobuf/types" + "github.com/docker/go-metrics" "github.com/prometheus/client_golang/prometheus" ) -// Statable type that returns cgroup metrics -type Statable interface { - ID() string - Namespace() string - Stats(context.Context) (*types.Any, error) -} - // NewCollector registers the collector with the provided namespace and returns it so // that cgroups can be added for collection func NewCollector(ns *metrics.Namespace) *Collector { @@ -47,7 +41,7 @@ func NewCollector(ns *metrics.Namespace) *Collector { } c := &Collector{ ns: ns, - tasks: make(map[string]Statable), + tasks: make(map[string]entry), } c.metrics = append(c.metrics, pidMetrics...) c.metrics = append(c.metrics, cpuMetrics...) @@ -62,15 +56,44 @@ func taskID(id, namespace string) string { return fmt.Sprintf("%s-%s", id, namespace) } +type entry struct { + task common.Statable + // ns is an optional child namespace that contains additional to parent labels. + // This can be used to append task specific labels to be able to differentiate the different containerd metrics. + ns *metrics.Namespace +} + // Collector provides the ability to collect container stats and export // them in the prometheus format type Collector struct { - mu sync.RWMutex - - tasks map[string]Statable ns *metrics.Namespace - metrics []*metric storedMetrics chan prometheus.Metric + + // TODO(fuweid): + // + // The Collector.Collect will be the field ns'Collect's callback, + // which be invoked periodically with internal lock. And Collector.Add + // might also invoke ns.Lock if the labels is not nil, which is easy to + // cause dead-lock. + // + // Goroutine X: + // + // ns.Collect + // ns.Lock + // Collector.Collect + // Collector.RLock + // + // + // Goroutine Y: + // + // Collector.Add + // ...(RLock/Lock) + // ns.Lock + // + // I think we should seek the way to decouple ns from Collector. + mu sync.RWMutex + tasks map[string]entry + metrics []*metric } // Describe prometheus metrics @@ -102,10 +125,11 @@ storedLoop: wg.Wait() } -func (c *Collector) collect(t Statable, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) { +func (c *Collector) collect(entry entry, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) { if wg != nil { defer wg.Done() } + t := entry.task ctx := namespaces.WithNamespace(context.Background(), t.Namespace()) stats, err := t.Stats(ctx) if err != nil { @@ -122,28 +146,39 @@ func (c *Collector) collect(t Statable, ch chan<- prometheus.Metric, block bool, log.L.WithError(err).Errorf("invalid metric type for %s", t.ID()) return } + ns := entry.ns + if ns == nil { + ns = c.ns + } for _, m := range c.metrics { - m.collect(t.ID(), t.Namespace(), s, c.ns, ch, block) + m.collect(t.ID(), t.Namespace(), s, ns, ch, block) } } // Add adds the provided cgroup and id so that metrics are collected and exported -func (c *Collector) Add(t Statable) error { +func (c *Collector) Add(t common.Statable, labels map[string]string) error { if c.ns == nil { return nil } - c.mu.Lock() - defer c.mu.Unlock() + c.mu.RLock() id := taskID(t.ID(), t.Namespace()) - if _, ok := c.tasks[id]; ok { + _, ok := c.tasks[id] + c.mu.RUnlock() + if ok { return nil // requests to collect metrics should be idempotent } - c.tasks[id] = t + entry := entry{task: t} + if labels != nil { + entry.ns = c.ns.WithConstLabels(labels) + } + c.mu.Lock() + c.tasks[id] = entry + c.mu.Unlock() return nil } // Remove removes the provided cgroup by id from the collector -func (c *Collector) Remove(t Statable) { +func (c *Collector) Remove(t common.Statable) { if c.ns == nil { return } @@ -158,6 +193,6 @@ func (c *Collector) RemoveAll() { return } c.mu.Lock() - c.tasks = make(map[string]Statable) + c.tasks = make(map[string]entry) c.mu.Unlock() } diff --git a/metrics/cgroups/v2/pids.go b/metrics/cgroups/v2/pids.go index f1028b0..78f90ca 100644 --- a/metrics/cgroups/v2/pids.go +++ b/metrics/cgroups/v2/pids.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/metrics.go b/metrics/metrics.go new file mode 100644 index 0000000..1debd87 --- /dev/null +++ b/metrics/metrics.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metrics + +import ( + "github.com/containerd/containerd/version" + goMetrics "github.com/docker/go-metrics" +) + +func init() { + ns := goMetrics.NewNamespace("containerd", "", nil) + c := ns.NewLabeledCounter("build_info", "containerd build information", "version", "revision") + c.WithValues(version.Version, version.Revision).Inc() + goMetrics.Register(ns) +} diff --git a/metrics/types/v1/types.go b/metrics/types/v1/types.go index 025c1f4..69b1b65 100644 --- a/metrics/types/v1/types.go +++ b/metrics/types/v1/types.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/metrics/types/v2/types.go b/metrics/types/v2/types.go index eac8d46..8b5b596 100644 --- a/metrics/types/v2/types.go +++ b/metrics/types/v2/types.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/sys/mount_linux.go b/mount/fmountat_linux.go similarity index 90% rename from sys/mount_linux.go rename to mount/fmountat_linux.go index a210455..850a92a 100644 --- a/sys/mount_linux.go +++ b/mount/fmountat_linux.go @@ -14,20 +14,20 @@ limitations under the License. */ -package sys +package mount import ( + "fmt" "runtime" "syscall" "unsafe" "github.com/containerd/containerd/log" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) -// FMountat performs mount from the provided directory. -func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { +// fMountat performs mount from the provided directory. +func fMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { var ( sourceP, targetP, fstypeP, dataP *byte pid uintptr @@ -62,7 +62,7 @@ func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data var pipefds [2]int if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return errors.Wrap(err, "failed to open pipe") + return fmt.Errorf("failed to open pipe: %w", err) } defer func() { @@ -82,7 +82,7 @@ func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data ) if errno != 0 { - return errors.Wrap(errno, "failed to fork thread") + return fmt.Errorf("failed to fork thread: %w", errno) } defer func() { @@ -101,11 +101,11 @@ func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data uintptr(unsafe.Pointer(&status)), unsafe.Sizeof(status)) if errno != 0 { - return errors.Wrap(errno, "failed to read pipe") + return fmt.Errorf("failed to read pipe: %w", errno) } if status != 0 { - return errors.Wrap(status, "failed to mount") + return fmt.Errorf("failed to mount: %w", status) } return nil diff --git a/sys/mount_linux_test.go b/mount/fmountat_linux_test.go similarity index 89% rename from sys/mount_linux_test.go rename to mount/fmountat_linux_test.go index 38745f4..154165c 100644 --- a/sys/mount_linux_test.go +++ b/mount/fmountat_linux_test.go @@ -14,10 +14,10 @@ limitations under the License. */ -package sys +package mount import ( - "io/ioutil" + "errors" "os" "path/filepath" "syscall" @@ -25,14 +25,13 @@ import ( "time" "github.com/containerd/continuity/fs/fstest" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) type fMountatCaseFunc func(t *testing.T, root string) func TestFMountat(t *testing.T) { - if !runningPrivileged() { + if unix.Geteuid() != 0 { t.Skip("Needs to be run as root") return } @@ -46,11 +45,7 @@ func makeTestForFMountat(fn fMountatCaseFunc) func(t *testing.T) { return func(t *testing.T) { t.Parallel() - suiteDir, err := ioutil.TempDir("", "fmountat-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(suiteDir) + suiteDir := t.TempDir() fn(t, suiteDir) } @@ -88,13 +83,13 @@ func testFMountatNormal(t *testing.T, root string) { defer f.Close() // mount work to fs - if err = FMountat(f.Fd(), workdir, "fs", "bind", unix.MS_BIND|unix.MS_RDONLY, ""); err != nil { + if err = fMountat(f.Fd(), workdir, "fs", "bind", unix.MS_BIND|unix.MS_RDONLY, ""); err != nil { t.Fatalf("expected no error here, but got error: %+v", err) } defer umount(t, fsdir) // check hi file - content, err := ioutil.ReadFile(filepath.Join(fsdir, "hi")) + content, err := os.ReadFile(filepath.Join(fsdir, "hi")) if err != nil { t.Fatalf("failed to read file: %+v", err) } @@ -124,9 +119,9 @@ func testFMountatWithFileFd(t *testing.T, root string) { } defer f.Close() - err = FMountat(f.Fd(), filepath.Join(root, "empty"), filepath.Join(root, "work"), "", 0, "") + err = fMountat(f.Fd(), filepath.Join(root, "empty"), filepath.Join(root, "work"), "", 0, "") if !errors.Is(err, expectedErr) { - t.Fatalf("expected error %v, but got %v", expectedErr, errors.Cause(err)) + t.Fatalf("expected error %v, but got %v", expectedErr, errors.Unwrap(err)) } } @@ -145,7 +140,7 @@ func testFMountatWithInvalidSource(t *testing.T, root string) { } defer f.Close() - err = FMountat(f.Fd(), filepath.Join(root, "oops"), "at", "bind", unix.MS_BIND, "") + err = fMountat(f.Fd(), filepath.Join(root, "oops"), "at", "bind", unix.MS_BIND, "") if !errors.Is(err, expectedErr) { t.Fatalf("expected error %v, but got %v", expectedErr, err) } diff --git a/mount/lookup_linux_test.go b/mount/lookup_linux_test.go index f2ca5fd..2e31070 100644 --- a/mount/lookup_linux_test.go +++ b/mount/lookup_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -20,9 +18,7 @@ package mount import ( "fmt" - "io/ioutil" "os" - "os/exec" "path/filepath" "strings" "testing" @@ -31,6 +27,7 @@ import ( // so we use continuity/testutil instead. "github.com/containerd/continuity/testutil" "github.com/containerd/continuity/testutil/loopback" + exec "golang.org/x/sys/execabs" "gotest.tools/v3/assert" ) @@ -44,11 +41,7 @@ func checkLookup(t *testing.T, fsType, mntPoint, dir string) { func testLookup(t *testing.T, fsType string) { testutil.RequiresRoot(t) - mnt, err := ioutil.TempDir("", "containerd-mountinfo-test-lookup") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(mnt) + mnt := t.TempDir() loop, err := loopback.New(100 << 20) // 100 MB if err != nil { @@ -71,11 +64,7 @@ func testLookup(t *testing.T, fsType string) { assert.Check(t, strings.HasPrefix(loop.Device, "/dev/loop")) checkLookup(t, fsType, mnt, mnt) - newMnt, err := ioutil.TempDir("", "containerd-mountinfo-test-newMnt") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(newMnt) + newMnt := t.TempDir() if out, err := exec.Command("mount", "--bind", mnt, newMnt).CombinedOutput(); err != nil { t.Fatalf("could not mount %s to %s: %v (out: %q)", mnt, newMnt, err, string(out)) @@ -102,21 +91,10 @@ func TestLookupWithXFS(t *testing.T) { } func TestLookupWithOverlay(t *testing.T) { - lower, err := ioutil.TempDir("", "containerd-mountinfo-test-lower") - assert.NilError(t, err) - defer os.RemoveAll(lower) - - upper, err := ioutil.TempDir("", "containerd-mountinfo-test-upper") - assert.NilError(t, err) - defer os.RemoveAll(upper) - - work, err := ioutil.TempDir("", "containerd-mountinfo-test-work") - assert.NilError(t, err) - defer os.RemoveAll(work) - - overlay, err := ioutil.TempDir("", "containerd-mountinfo-test-overlay") - assert.NilError(t, err) - defer os.RemoveAll(overlay) + lower := t.TempDir() + upper := t.TempDir() + work := t.TempDir() + overlay := t.TempDir() if out, err := exec.Command("mount", "-t", "overlay", "overlay", "-o", fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work), overlay).CombinedOutput(); err != nil { @@ -126,7 +104,7 @@ func TestLookupWithOverlay(t *testing.T) { defer testutil.Unmount(t, overlay) testdir := filepath.Join(overlay, "testdir") - err = os.Mkdir(testdir, 0777) + err := os.Mkdir(testdir, 0777) assert.NilError(t, err) testfile := filepath.Join(overlay, "testfile") diff --git a/mount/lookup_unix.go b/mount/lookup_unix.go index a2d51ce..4488175 100644 --- a/mount/lookup_unix.go +++ b/mount/lookup_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,10 +20,10 @@ package mount import ( + "fmt" "path/filepath" "github.com/moby/sys/mountinfo" - "github.com/pkg/errors" ) // Lookup returns the mount info corresponds to the path. @@ -31,10 +32,10 @@ func Lookup(dir string) (Info, error) { m, err := mountinfo.GetMounts(mountinfo.ParentsFilter(dir)) if err != nil { - return Info{}, errors.Wrapf(err, "failed to find the mount info for %q", dir) + return Info{}, fmt.Errorf("failed to find the mount info for %q: %w", dir, err) } if len(m) == 0 { - return Info{}, errors.Errorf("failed to find the mount info for %q", dir) + return Info{}, fmt.Errorf("failed to find the mount info for %q", dir) } // find the longest matching mount point diff --git a/mount/lookup_unsupported.go b/mount/lookup_unsupported.go index 46ec66a..2e954b1 100644 --- a/mount/lookup_unsupported.go +++ b/mount/lookup_unsupported.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* diff --git a/mount/losetup_linux.go b/mount/losetup_linux.go index e99e962..5bfd7f1 100644 --- a/mount/losetup_linux.go +++ b/mount/losetup_linux.go @@ -17,15 +17,15 @@ package mount import ( + "errors" "fmt" - "math/rand" "os" "strings" "syscall" "time" "unsafe" - "github.com/pkg/errors" + "github.com/containerd/containerd/pkg/randutil" "golang.org/x/sys/unix" ) @@ -59,12 +59,12 @@ func ioctl(fd, req, args uintptr) (uintptr, uintptr, error) { func getFreeLoopDev() (uint32, error) { ctrl, err := os.OpenFile(loopControlPath, os.O_RDWR, 0) if err != nil { - return 0, errors.Errorf("could not open %v: %v", loopControlPath, err) + return 0, fmt.Errorf("could not open %v: %v", loopControlPath, err) } defer ctrl.Close() num, _, err := ioctl(ctrl.Fd(), unix.LOOP_CTL_GET_FREE, 0) if err != nil { - return 0, errors.Wrap(err, "could not get free loop device") + return 0, fmt.Errorf("could not get free loop device: %w", err) } return uint32(num), nil } @@ -81,13 +81,13 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re back, err := os.OpenFile(backingFile, flags, 0) if err != nil { - return nil, errors.Wrapf(err, "could not open backing file: %s", backingFile) + return nil, fmt.Errorf("could not open backing file: %s: %w", backingFile, err) } defer back.Close() loop, err := os.OpenFile(loopDev, flags, 0) if err != nil { - return nil, errors.Wrapf(err, "could not open loop device: %s", loopDev) + return nil, fmt.Errorf("could not open loop device: %s: %w", loopDev, err) } defer func() { if retErr != nil { @@ -97,7 +97,7 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re // 2. Set FD if _, _, err = ioctl(loop.Fd(), unix.LOOP_SET_FD, back.Fd()); err != nil { - return nil, errors.Wrapf(err, "could not set loop fd for device: %s", loopDev) + return nil, fmt.Errorf("could not set loop fd for device: %s: %w", loopDev, err) } // 3. Set Info @@ -131,7 +131,7 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re } _, _, _ = ioctl(loop.Fd(), unix.LOOP_CLR_FD, 0) - return nil, errors.Errorf("failed to set loop device info: %v", err) + return nil, fmt.Errorf("failed to set loop device info: %v", err) } // setupLoop looks for (and possibly creates) a free loop device, and @@ -163,7 +163,7 @@ func setupLoop(backingFile string, param LoopParams) (*os.File, error) { // with EBUSY when trying to set it up. if strings.Contains(err.Error(), ebusyString) { // Fallback a bit to avoid live lock - time.Sleep(time.Millisecond * time.Duration(rand.Intn(retry*10))) + time.Sleep(time.Millisecond * time.Duration(randutil.Intn(retry*10))) continue } return nil, err @@ -200,7 +200,7 @@ func AttachLoopDevice(backingFile string) (string, error) { func DetachLoopDevice(devices ...string) error { for _, dev := range devices { if err := removeLoop(dev); err != nil { - return errors.Wrapf(err, "failed to remove loop device: %s", dev) + return fmt.Errorf("failed to remove loop device: %s: %w", dev, err) } } diff --git a/mount/losetup_linux_test.go b/mount/losetup_linux_test.go index 75bc2b9..938950f 100644 --- a/mount/losetup_linux_test.go +++ b/mount/losetup_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,7 +17,6 @@ package mount import ( - "io/ioutil" "os" "testing" @@ -31,7 +28,7 @@ var randomData = []byte("randomdata") func createTempFile(t *testing.T) string { t.Helper() - f, err := ioutil.TempFile("", "losetup") + f, err := os.CreateTemp("", "losetup") if err != nil { t.Fatal(err) } diff --git a/mount/mount.go b/mount/mount.go index b25556b..9dd4f32 100644 --- a/mount/mount.go +++ b/mount/mount.go @@ -16,6 +16,10 @@ package mount +import ( + "strings" +) + // Mount is the lingua franca of containerd. A mount represents a // serialized mount syscall. Components either emit or consume mounts. type Mount struct { @@ -38,3 +42,46 @@ func All(mounts []Mount, target string) error { } return nil } + +// readonlyMounts modifies the received mount options +// to make them readonly +func readonlyMounts(mounts []Mount) []Mount { + for i, m := range mounts { + if m.Type == "overlay" { + mounts[i].Options = readonlyOverlay(m.Options) + continue + } + opts := make([]string, 0, len(m.Options)) + for _, opt := range m.Options { + if opt != "rw" && opt != "ro" { // skip `ro` too so we don't append it twice + opts = append(opts, opt) + } + } + opts = append(opts, "ro") + mounts[i].Options = opts + } + return mounts +} + +// readonlyOverlay takes mount options for overlay mounts and makes them readonly by +// removing workdir and upperdir (and appending the upperdir layer to lowerdir) - see: +// https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#multiple-lower-layers +func readonlyOverlay(opt []string) []string { + out := make([]string, 0, len(opt)) + upper := "" + for _, o := range opt { + if strings.HasPrefix(o, "upperdir=") { + upper = strings.TrimPrefix(o, "upperdir=") + } else if !strings.HasPrefix(o, "workdir=") { + out = append(out, o) + } + } + if upper != "" { + for i, o := range out { + if strings.HasPrefix(o, "lowerdir=") { + out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=") + } + } + } + return out +} diff --git a/mount/mount_freebsd.go b/mount/mount_freebsd.go index d524500..3711383 100644 --- a/mount/mount_freebsd.go +++ b/mount/mount_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. @@ -19,11 +17,12 @@ package mount import ( + "errors" + "fmt" "os" - "os/exec" "time" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -66,7 +65,7 @@ func (m *Mount) mountWithHelper(target string) error { return nil } if !errors.Is(err, unix.ECHILD) { - return errors.Wrapf(err, "mount [%v] failed: %q", args, string(out)) + return fmt.Errorf("mount [%v] failed: %q: %w", args, string(out), err) } // We got ECHILD, we are not sure whether the mount was successful. // If the mount ID has changed, we are sure we got some new mount, but still not sure it is fully completed. @@ -79,7 +78,7 @@ func (m *Mount) mountWithHelper(target string) error { _ = unmount(target, 0) } } - return errors.Errorf("mount [%v] failed with ECHILD (retired %d times)", args, retriesOnECHILD) + return fmt.Errorf("mount [%v] failed with ECHILD (retired %d times)", args, retriesOnECHILD) } // Unmount the provided mount path with the flags @@ -103,7 +102,7 @@ func unmount(target string, flags int) error { } return nil } - return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target) + return fmt.Errorf("failed to unmount target %s: %w", target, unix.EBUSY) } // UnmountAll repeatedly unmounts the given mount point until there diff --git a/mount/mount_linux.go b/mount/mount_linux.go index 3f05ebc..a69f65c 100644 --- a/mount/mount_linux.go +++ b/mount/mount_linux.go @@ -17,15 +17,14 @@ package mount import ( + "errors" "fmt" "os" - "os/exec" "path" "strings" "time" - "github.com/containerd/containerd/sys" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -64,7 +63,7 @@ func (m *Mount) Mount(target string) (err error) { flags, data, losetup := parseMountOptions(options) if len(data) > pagesize { - return errors.Errorf("mount options is too long") + return errors.New("mount options is too long") } // propagation types. @@ -164,7 +163,7 @@ func unmount(target string, flags int) error { } return nil } - return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target) + return fmt.Errorf("failed to unmount target %s: %w", target, unix.EBUSY) } // UnmountAll repeatedly unmounts the given mount point until there @@ -366,19 +365,22 @@ func mountAt(chdir string, source, target, fstype string, flags uintptr, data st f, err := os.Open(chdir) if err != nil { - return errors.Wrap(err, "failed to mountat") + return fmt.Errorf("failed to mountat: %w", err) } defer f.Close() fs, err := f.Stat() if err != nil { - return errors.Wrap(err, "failed to mountat") + return fmt.Errorf("failed to mountat: %w", err) } if !fs.IsDir() { - return errors.Wrap(errors.Errorf("%s is not dir", chdir), "failed to mountat") + return fmt.Errorf("failed to mountat: %s is not dir", chdir) } - return errors.Wrap(sys.FMountat(f.Fd(), source, target, fstype, flags, data), "failed to mountat") + if err := fMountat(f.Fd(), source, target, fstype, flags, data); err != nil { + return fmt.Errorf("failed to mountat: %w", err) + } + return nil } func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error { @@ -407,7 +409,7 @@ func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error { return nil } if !errors.Is(err, unix.ECHILD) { - return errors.Wrapf(err, "mount helper [%s %v] failed: %q", helperBinary, args, string(out)) + return fmt.Errorf("mount helper [%s %v] failed: %q: %w", helperBinary, args, string(out), err) } // We got ECHILD, we are not sure whether the mount was successful. // If the mount ID has changed, we are sure we got some new mount, but still not sure it is fully completed. @@ -420,5 +422,5 @@ func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error { _ = unmount(target, 0) } } - return errors.Errorf("mount helper [%s %v] failed with ECHILD (retired %d times)", helperBinary, args, retriesOnECHILD) + return fmt.Errorf("mount helper [%s %v] failed with ECHILD (retired %d times)", helperBinary, args, retriesOnECHILD) } diff --git a/mount/mount_linux_test.go b/mount/mount_linux_test.go index 131294d..84e7085 100644 --- a/mount/mount_linux_test.go +++ b/mount/mount_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -20,14 +18,13 @@ package mount import ( "fmt" - "io/ioutil" "os" - "os/exec" "path/filepath" "reflect" "testing" "github.com/containerd/continuity/testutil" + exec "golang.org/x/sys/execabs" ) func TestLongestCommonPrefix(t *testing.T) { @@ -107,15 +104,7 @@ func TestFUSEHelper(t *testing.T) { if err != nil { t.Skip("fuse-overlayfs not installed") } - td, err := ioutil.TempDir("", "fuse") - if err != nil { - t.Fatal(err) - } - defer func() { - if err := os.RemoveAll(td); err != nil { - t.Fatal(err) - } - }() + td := t.TempDir() for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { diff --git a/mount/mount_test.go b/mount/mount_test.go new file mode 100644 index 0000000..5d0c7f6 --- /dev/null +++ b/mount/mount_test.go @@ -0,0 +1,150 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "reflect" + "testing" + + // required for `-test.root` flag not to fail + _ "github.com/containerd/continuity/testutil" +) + +func TestReadonlyMounts(t *testing.T) { + testCases := []struct { + desc string + input []Mount + expected []Mount + }{ + { + desc: "empty slice", + input: []Mount{}, + expected: []Mount{}, + }, + { + desc: "removes `upperdir` and `workdir` from overlay mounts, appends upper layer to lower", + input: []Mount{ + { + Type: "overlay", + Source: "overlay", + Options: []string{ + "index=off", + "workdir=/path/to/snapshots/4/work", + "upperdir=/path/to/snapshots/4/fs", + "lowerdir=/path/to/snapshots/1/fs", + }, + }, + { + Type: "overlay", + Source: "overlay", + Options: []string{ + "index=on", + "lowerdir=/another/path/to/snapshots/2/fs", + }, + }, + }, + expected: []Mount{ + { + Type: "overlay", + Source: "overlay", + Options: []string{ + "index=off", + "lowerdir=/path/to/snapshots/4/fs:/path/to/snapshots/1/fs", + }, + }, + { + Type: "overlay", + Source: "overlay", + Options: []string{ + "index=on", + "lowerdir=/another/path/to/snapshots/2/fs", + }, + }, + }, + }, + { + desc: "removes `rw` and appends `ro` (once) to other mount types", + input: []Mount{ + { + Type: "mount-without-rw", + Source: "", + Options: []string{ + "index=off", + "workdir=/path/to/other/snapshots/work", + "upperdir=/path/to/other/snapshots/2", + "lowerdir=/path/to/other/snapshots/1", + }, + }, + { + Type: "mount-with-rw", + Source: "", + Options: []string{ + "an-option=a-value", + "another_opt=/another/value", + "rw", + }, + }, + { + Type: "mount-with-ro", + Source: "", + Options: []string{ + "an-option=a-value", + "another_opt=/another/value", + "ro", + }, + }, + }, + expected: []Mount{ + { + Type: "mount-without-rw", + Source: "", + Options: []string{ + "index=off", + "workdir=/path/to/other/snapshots/work", + "upperdir=/path/to/other/snapshots/2", + "lowerdir=/path/to/other/snapshots/1", + "ro", + }, + }, + { + Type: "mount-with-rw", + Source: "", + Options: []string{ + "an-option=a-value", + "another_opt=/another/value", + "ro", + }, + }, + { + Type: "mount-with-ro", + Source: "", + Options: []string{ + "an-option=a-value", + "another_opt=/another/value", + "ro", + }, + }, + }, + }, + } + + for _, tc := range testCases { + if !reflect.DeepEqual(readonlyMounts(tc.input), tc.expected) { + t.Fatalf("incorrectly modified mounts: %s", tc.desc) + } + } +} diff --git a/mount/mount_unix.go b/mount/mount_unix.go index ecf569d..795bb4b 100644 --- a/mount/mount_unix.go +++ b/mount/mount_unix.go @@ -1,3 +1,4 @@ +//go:build darwin || openbsd // +build darwin openbsd /* @@ -18,7 +19,7 @@ package mount -import "github.com/pkg/errors" +import "errors" var ( // ErrNotImplementOnUnix is returned for methods that are not implemented diff --git a/mount/mount_windows.go b/mount/mount_windows.go index 5de25c4..87fed82 100644 --- a/mount/mount_windows.go +++ b/mount/mount_windows.go @@ -18,11 +18,13 @@ package mount import ( "encoding/json" + "errors" + "fmt" + "os" "path/filepath" "strings" "github.com/Microsoft/hcsshim" - "github.com/pkg/errors" ) var ( @@ -33,7 +35,7 @@ var ( // Mount to the provided target func (m *Mount) Mount(target string) error { if m.Type != "windows-layer" { - return errors.Errorf("invalid windows mount type: '%s'", m.Type) + return fmt.Errorf("invalid windows mount type: '%s'", m.Type) } home, layerID := filepath.Split(m.Source) @@ -48,16 +50,22 @@ func (m *Mount) Mount(target string) error { } if err = hcsshim.ActivateLayer(di, layerID); err != nil { - return errors.Wrapf(err, "failed to activate layer %s", m.Source) + return fmt.Errorf("failed to activate layer %s: %w", m.Source, err) } - defer func() { - if err != nil { - hcsshim.DeactivateLayer(di, layerID) - } - }() if err = hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil { - return errors.Wrapf(err, "failed to prepare layer %s", m.Source) + return fmt.Errorf("failed to prepare layer %s: %w", m.Source, err) + } + + // We can link the layer mount path to the given target. It is an UNC path, and it needs + // a trailing backslash. + mountPath, err := hcsshim.GetLayerMountPath(di, layerID) + if err != nil { + return fmt.Errorf("failed to get layer mount path for %s: %w", m.Source, err) + } + mountPath = mountPath + `\` + if err = os.Symlink(mountPath, target); err != nil { + return fmt.Errorf("failed to link mount to taget %s: %w", target, err) } return nil } @@ -73,7 +81,7 @@ func (m *Mount) GetParentPaths() ([]string, error) { if strings.HasPrefix(option, ParentLayerPathsFlag) { err := json.Unmarshal([]byte(option[len(ParentLayerPathsFlag):]), &parentLayerPaths) if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal parent layer paths from mount") + return nil, fmt.Errorf("failed to unmarshal parent layer paths from mount: %w", err) } } } @@ -90,10 +98,10 @@ func Unmount(mount string, flags int) error { ) if err := hcsshim.UnprepareLayer(di, layerID); err != nil { - return errors.Wrapf(err, "failed to unprepare layer %s", mount) + return fmt.Errorf("failed to unprepare layer %s: %w", mount, err) } if err := hcsshim.DeactivateLayer(di, layerID); err != nil { - return errors.Wrapf(err, "failed to deactivate layer %s", mount) + return fmt.Errorf("failed to deactivate layer %s: %w", mount, err) } return nil diff --git a/sys/subprocess_unsafe_linux.go b/mount/subprocess_unsafe_linux.go similarity index 98% rename from sys/subprocess_unsafe_linux.go rename to mount/subprocess_unsafe_linux.go index 6e40a9c..c7cb0c0 100644 --- a/sys/subprocess_unsafe_linux.go +++ b/mount/subprocess_unsafe_linux.go @@ -14,7 +14,7 @@ limitations under the License. */ -package sys +package mount import ( _ "unsafe" // required for go:linkname. diff --git a/sys/subprocess_unsafe_linux.s b/mount/subprocess_unsafe_linux.s similarity index 100% rename from sys/subprocess_unsafe_linux.s rename to mount/subprocess_unsafe_linux.s diff --git a/mount/temp.go b/mount/temp.go index 9dc4010..889d49c 100644 --- a/mount/temp.go +++ b/mount/temp.go @@ -18,11 +18,10 @@ package mount import ( "context" - "io/ioutil" + "fmt" "os" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) var tempMountLocation = getTempDir() @@ -31,9 +30,9 @@ var tempMountLocation = getTempDir() // The mounts are valid during the call to the f. // Finally we will unmount and remove the temp dir regardless of the result of f. func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) error) (err error) { - root, uerr := ioutil.TempDir(tempMountLocation, "containerd-mount") + root, uerr := os.MkdirTemp(tempMountLocation, "containerd-mount") if uerr != nil { - return errors.Wrapf(uerr, "failed to create temp dir") + return fmt.Errorf("failed to create temp dir: %w", uerr) } // We use Remove here instead of RemoveAll. // The RemoveAll will delete the temp dir and all children it contains. @@ -44,25 +43,35 @@ func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) erro // For details, please refer to #1868 #1785. defer func() { if uerr = os.Remove(root); uerr != nil { - log.G(ctx).WithError(uerr).WithField("dir", root).Errorf("failed to remove mount temp dir") + log.G(ctx).WithError(uerr).WithField("dir", root).Error("failed to remove mount temp dir") } }() // We should do defer first, if not we will not do Unmount when only a part of Mounts are failed. defer func() { if uerr = UnmountAll(root, 0); uerr != nil { - uerr = errors.Wrapf(uerr, "failed to unmount %s", root) + uerr = fmt.Errorf("failed to unmount %s: %w", root, uerr) if err == nil { err = uerr } else { - err = errors.Wrap(err, uerr.Error()) + err = fmt.Errorf("%s: %w", uerr.Error(), err) } } }() if uerr = All(mounts, root); uerr != nil { - return errors.Wrapf(uerr, "failed to mount %s", root) + return fmt.Errorf("failed to mount %s: %w", root, uerr) } - return errors.Wrapf(f(root), "mount callback failed on %s", root) + if err := f(root); err != nil { + return fmt.Errorf("mount callback failed on %s: %w", root, err) + } + return nil +} + +// WithReadonlyTempMount mounts the provided mounts to a temp dir as readonly, +// and pass the temp dir to f. The mounts are valid during the call to the f. +// Finally we will unmount and remove the temp dir regardless of the result of f. +func WithReadonlyTempMount(ctx context.Context, mounts []Mount, f func(root string) error) (err error) { + return WithTempMount(ctx, readonlyMounts(mounts), f) } func getTempDir() string { diff --git a/mount/temp_unix.go b/mount/temp_unix.go index ed190b8..e969700 100644 --- a/mount/temp_unix.go +++ b/mount/temp_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/mount/temp_unsupported.go b/mount/temp_unsupported.go index 942be41..feec90a 100644 --- a/mount/temp_unsupported.go +++ b/mount/temp_unsupported.go @@ -1,4 +1,5 @@ -// +build windows +//go:build windows || darwin +// +build windows darwin /* Copyright The containerd Authors. diff --git a/namespaces/context.go b/namespaces/context.go index b53c901..e5e23fe 100644 --- a/namespaces/context.go +++ b/namespaces/context.go @@ -18,11 +18,11 @@ package namespaces import ( "context" + "fmt" "os" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/identifiers" - "github.com/pkg/errors" ) const ( @@ -69,10 +69,10 @@ func Namespace(ctx context.Context) (string, bool) { func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") + return "", fmt.Errorf("namespace is required: %w", errdefs.ErrFailedPrecondition) } if err := identifiers.Validate(namespace); err != nil { - return "", errors.Wrap(err, "namespace validation") + return "", fmt.Errorf("namespace validation: %w", err) } return namespace, nil } diff --git a/namespaces/context_test.go b/namespaces/context_test.go index 41d965c..2954c82 100644 --- a/namespaces/context_test.go +++ b/namespaces/context_test.go @@ -18,7 +18,6 @@ package namespaces import ( "context" - "os" "testing" ) @@ -47,9 +46,6 @@ func TestContext(t *testing.T) { } func TestNamespaceFromEnv(t *testing.T) { - oldenv := os.Getenv(NamespaceEnvVar) - defer os.Setenv(NamespaceEnvVar, oldenv) // restore old env var - ctx := context.Background() namespace, ok := Namespace(ctx) if ok { @@ -61,7 +57,7 @@ func TestNamespaceFromEnv(t *testing.T) { } expected := "test-namespace" - os.Setenv(NamespaceEnvVar, expected) + t.Setenv(NamespaceEnvVar, expected) nctx := NamespaceFromEnv(ctx) namespace, ok = Namespace(nctx) diff --git a/namespaces/store.go b/namespaces/store.go index 5936772..a1b2571 100644 --- a/namespaces/store.go +++ b/namespaces/store.go @@ -24,8 +24,6 @@ import "context" // oriented. A namespace is really just a name and a set of labels. Objects // that belong to a namespace are returned when the namespace is assigned to a // given context. -// -// type Store interface { Create(ctx context.Context, namespace string, labels map[string]string) error Labels(ctx context.Context, namespace string) (map[string]string, error) diff --git a/oci/mounts.go b/oci/mounts.go new file mode 100644 index 0000000..83dd0d0 --- /dev/null +++ b/oci/mounts.go @@ -0,0 +1,71 @@ +//go:build !freebsd +// +build !freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func defaultMounts() []specs.Mount { + return []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/run", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + } +} diff --git a/vendor/github.com/containerd/go-runc/command_other.go b/oci/mounts_freebsd.go similarity index 62% rename from vendor/github.com/containerd/go-runc/command_other.go rename to oci/mounts_freebsd.go index b8fd4b8..42b9d7a 100644 --- a/vendor/github.com/containerd/go-runc/command_other.go +++ b/oci/mounts_freebsd.go @@ -1,5 +1,3 @@ -// +build !linux - /* Copyright The containerd Authors. @@ -16,20 +14,25 @@ limitations under the License. */ -package runc +package oci import ( - "context" - "os" - "os/exec" + specs "github.com/opencontainers/runtime-spec/specs-go" ) -func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { - command := r.Command - if command == "" { - command = DefaultCommand +func defaultMounts() []specs.Mount { + return []specs.Mount{ + { + Destination: "/dev", + Type: "devfs", + Source: "devfs", + Options: []string{"ruleset=4"}, + }, + { + Destination: "/dev/fd", + Type: "fdescfs", + Source: "fdescfs", + Options: []string{}, + }, } - cmd := exec.CommandContext(context, command, append(r.args(), args...)...) - cmd.Env = os.Environ() - return cmd } diff --git a/oci/spec.go b/oci/spec.go index ff25ddf..a1c98dd 100644 --- a/oci/spec.go +++ b/oci/spec.go @@ -160,50 +160,6 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { }, }, }, - Mounts: []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - { - Destination: "/run", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, - }, Linux: &specs.Linux{ MaskedPaths: []string{ "/proc/acpi", @@ -236,6 +192,7 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { Namespaces: defaultUnixNamespaces(), }, } + s.Mounts = defaultMounts() return nil } diff --git a/oci/spec_opts.go b/oci/spec_opts.go index 7985387..65811fc 100644 --- a/oci/spec_opts.go +++ b/oci/spec_opts.go @@ -20,10 +20,11 @@ import ( "bufio" "context" "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" + "runtime" "strconv" "strings" @@ -36,8 +37,7 @@ import ( "github.com/containerd/continuity/fs" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runc/libcontainer/user" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "github.com/opencontainers/runtime-spec/specs-go" ) // SpecOpts sets spec specific information to a newly generated OCI spec @@ -76,7 +76,6 @@ func setLinux(s *Spec) { } } -// nolint func setResources(s *Spec) { if s.Linux != nil { if s.Linux.Resources == nil { @@ -90,7 +89,7 @@ func setResources(s *Spec) { } } -// nolint +//nolint:nolintlint,unused // not used on all platforms func setCPU(s *Spec) { setResources(s) if s.Linux != nil { @@ -113,6 +112,17 @@ func setCapabilities(s *Spec) { } } +// ensureAdditionalGids ensures that the primary GID is also included in the additional GID list. +func ensureAdditionalGids(s *Spec) { + setProcess(s) + for _, f := range s.Process.User.AdditionalGids { + if f == s.Process.User.GID { + return + } + } + s.Process.User.AdditionalGids = append([]uint32{s.Process.User.GID}, s.Process.User.AdditionalGids...) +} + // WithDefaultSpec returns a SpecOpts that will populate the spec with default // values. // @@ -138,7 +148,7 @@ func WithSpecFromBytes(p []byte) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { *s = Spec{} // make sure spec is cleared. if err := json.Unmarshal(p, s); err != nil { - return errors.Wrapf(err, "decoding spec config file failed, current supported OCI runtime-spec : v%s", specs.Version) + return fmt.Errorf("decoding spec config file failed, current supported OCI runtime-spec : v%s: %w", specs.Version, err) } return nil } @@ -147,9 +157,9 @@ func WithSpecFromBytes(p []byte) SpecOpts { // WithSpecFromFile loads the specification from the provided filename. func WithSpecFromFile(filename string) SpecOpts { return func(ctx context.Context, c Client, container *containers.Container, s *Spec) error { - p, err := ioutil.ReadFile(filename) + p, err := os.ReadFile(filename) if err != nil { - return errors.Wrap(err, "cannot load spec config file") + return fmt.Errorf("cannot load spec config file: %w", err) } return WithSpecFromBytes(p)(ctx, c, container, s) } @@ -218,6 +228,7 @@ func WithProcessArgs(args ...string) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { setProcess(s) s.Process.Args = args + s.Process.CommandLine = "" return nil } } @@ -347,17 +358,19 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { return err } var ( - ociimage v1.Image - config v1.ImageConfig + imageConfigBytes []byte + ociimage v1.Image + config v1.ImageConfig ) switch ic.MediaType { case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config: - p, err := content.ReadBlob(ctx, image.ContentStore(), ic) + var err error + imageConfigBytes, err = content.ReadBlob(ctx, image.ContentStore(), ic) if err != nil { return err } - if err := json.Unmarshal(p, &ociimage); err != nil { + if err := json.Unmarshal(imageConfigBytes, &ociimage); err != nil { return err } config = ociimage.Config @@ -394,11 +407,55 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { return WithAdditionalGIDs("root")(ctx, client, c, s) } else if s.Windows != nil { s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env) + + // To support Docker ArgsEscaped on Windows we need to combine the + // image Entrypoint & (Cmd Or User Args) while taking into account + // if Docker has already escaped them in the image config. When + // Docker sets `ArgsEscaped==true` in the config it has pre-escaped + // either Entrypoint or Cmd or both. Cmd should always be treated as + // arguments appended to Entrypoint unless: + // + // 1. Entrypoint does not exist, in which case Cmd[0] is the + // executable. + // + // 2. The user overrides the Cmd with User Args when activating the + // container in which case those args should be appended to the + // Entrypoint if it exists. + // + // To effectively do this we need to know if the arguments came from + // the user or if the arguments came from the image config when + // ArgsEscaped==true. In this case we only want to escape the + // additional user args when forming the complete CommandLine. This + // is safe in both cases of Entrypoint or Cmd being set because + // Docker will always escape them to an array of length one. Thus in + // both cases it is the "executable" portion of the command. + // + // In the case ArgsEscaped==false, Entrypoint or Cmd will contain + // any number of entries that are all unescaped and can simply be + // combined (potentially overwriting Cmd with User Args if present) + // and forwarded the container start as an Args array. cmd := config.Cmd + cmdFromImage := true if len(args) > 0 { cmd = args + cmdFromImage = false + } + + cmd = append(config.Entrypoint, cmd...) + if len(cmd) == 0 { + return errors.New("no arguments specified") + } + + if config.ArgsEscaped && (len(config.Entrypoint) > 0 || cmdFromImage) { + s.Process.Args = nil + s.Process.CommandLine = cmd[0] + if len(cmd) > 1 { + s.Process.CommandLine += " " + escapeAndCombineArgs(cmd[1:]) + } + } else { + s.Process.Args = cmd + s.Process.CommandLine = "" } - s.Process.Args = append(config.Entrypoint, cmd...) s.Process.Cwd = config.WorkingDir s.Process.User = specs.User{ @@ -518,10 +575,25 @@ func WithNamespacedCgroup() SpecOpts { // WithUser sets the user to be used within the container. // It accepts a valid user string in OCI Image Spec v1.0.0: -// user, uid, user:group, uid:gid, uid:group, user:gid +// +// user, uid, user:group, uid:gid, uid:group, user:gid func WithUser(userstr string) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + defer ensureAdditionalGids(s) setProcess(s) + s.Process.User.AdditionalGids = nil + + // For LCOW it's a bit harder to confirm that the user actually exists on the host as a rootfs isn't + // mounted on the host and shared into the guest, but rather the rootfs is constructed entirely in the + // guest itself. To accommodate this, a spot to place the user string provided by a client as-is is needed. + // The `Username` field on the runtime spec is marked by Platform as only for Windows, and in this case it + // *is* being set on a Windows host at least, but will be used as a temporary holding spot until the guest + // can use the string to perform these same operations to grab the uid:gid inside. + if s.Windows != nil && s.Linux != nil { + s.Process.User.Username = userstr + return nil + } + parts := strings.Split(userstr, ":") switch len(parts) { case 1: @@ -590,7 +662,12 @@ func WithUser(userstr string) SpecOpts { if err != nil { return err } - return mount.WithTempMount(ctx, mounts, f) + + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, f) default: return fmt.Errorf("invalid USER value %s", userstr) } @@ -600,7 +677,9 @@ func WithUser(userstr string) SpecOpts { // WithUIDGID allows the UID and GID for the Process to be set func WithUIDGID(uid, gid uint32) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + defer ensureAdditionalGids(s) setProcess(s) + s.Process.User.AdditionalGids = nil s.Process.User.UID = uid s.Process.User.GID = gid return nil @@ -613,37 +692,10 @@ func WithUIDGID(uid, gid uint32) SpecOpts { // additionally sets the gid to 0, and does not return an error. func WithUserID(uid uint32) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + defer ensureAdditionalGids(s) setProcess(s) - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.Errorf("rootfs absolute path is required") - } - user, err := UserFromPath(s.Root.Path, func(u user.User) bool { - return u.Uid == int(uid) - }) - if err != nil { - if os.IsNotExist(err) || err == ErrNoUsersFound { - s.Process.User.UID, s.Process.User.GID = uid, 0 - return nil - } - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - - } - if c.Snapshotter == "" { - return errors.Errorf("no snapshotter set for container") - } - if c.SnapshotKey == "" { - return errors.Errorf("rootfs snapshot not created for container") - } - snapshotter := client.SnapshotService(c.Snapshotter) - mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) - if err != nil { - return err - } - return mount.WithTempMount(ctx, mounts, func(root string) error { + s.Process.User.AdditionalGids = nil + setUser := func(root string) error { user, err := UserFromPath(root, func(u user.User) bool { return u.Uid == int(uid) }) @@ -656,43 +708,46 @@ func WithUserID(uid uint32) SpecOpts { } s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) return nil - }) + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return setUser(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setUser) } } // WithUsername sets the correct UID and GID for the container // based on the image's /etc/passwd contents. If /etc/passwd // does not exist, or the username is not found in /etc/passwd, -// it returns error. +// it returns error. On Windows this sets the username as provided, +// the operating system will validate the user when going to run +// the container. func WithUsername(username string) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + defer ensureAdditionalGids(s) setProcess(s) + s.Process.User.AdditionalGids = nil if s.Linux != nil { - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.Errorf("rootfs absolute path is required") - } - user, err := UserFromPath(s.Root.Path, func(u user.User) bool { - return u.Name == username - }) - if err != nil { - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - } - if c.Snapshotter == "" { - return errors.Errorf("no snapshotter set for container") - } - if c.SnapshotKey == "" { - return errors.Errorf("rootfs snapshot not created for container") - } - snapshotter := client.SnapshotService(c.Snapshotter) - mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) - if err != nil { - return err - } - return mount.WithTempMount(ctx, mounts, func(root string) error { + setUser := func(root string) error { user, err := UserFromPath(root, func(u user.User) bool { return u.Name == username }) @@ -701,7 +756,30 @@ func WithUsername(username string) SpecOpts { } s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) return nil - }) + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return setUser(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setUser) } else if s.Windows != nil { s.Process.User.Username = username } else { @@ -712,16 +790,18 @@ func WithUsername(username string) SpecOpts { } // WithAdditionalGIDs sets the OCI spec's additionalGids array to any additional groups listed -// for a particular user in the /etc/groups file of the image's root filesystem +// for a particular user in the /etc/group file of the image's root filesystem // The passed in user can be either a uid or a username. func WithAdditionalGIDs(userstr string) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { - // For LCOW additional GID's not supported - if s.Windows != nil { + // For LCOW or on Darwin additional GID's not supported + if s.Windows != nil || runtime.GOOS == "darwin" { return nil } setProcess(s) + s.Process.User.AdditionalGids = nil setAdditionalGids := func(root string) error { + defer ensureAdditionalGids(s) var username string uid, err := strconv.Atoi(userstr) if err == nil { @@ -761,22 +841,92 @@ func WithAdditionalGIDs(userstr string) SpecOpts { } if c.Snapshotter == "" && c.SnapshotKey == "" { if !isRootfsAbs(s.Root.Path) { - return errors.Errorf("rootfs absolute path is required") + return errors.New("rootfs absolute path is required") } return setAdditionalGids(s.Root.Path) } if c.Snapshotter == "" { - return errors.Errorf("no snapshotter set for container") + return errors.New("no snapshotter set for container") } if c.SnapshotKey == "" { - return errors.Errorf("rootfs snapshot not created for container") + return errors.New("rootfs snapshot not created for container") } snapshotter := client.SnapshotService(c.Snapshotter) mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) if err != nil { return err } - return mount.WithTempMount(ctx, mounts, setAdditionalGids) + + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setAdditionalGids) + } +} + +// WithAppendAdditionalGroups append additional groups within the container. +// The passed in groups can be either a gid or a groupname. +func WithAppendAdditionalGroups(groups ...string) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + // For LCOW or on Darwin additional GID's are not supported + if s.Windows != nil || runtime.GOOS == "darwin" { + return nil + } + setProcess(s) + setAdditionalGids := func(root string) error { + defer ensureAdditionalGids(s) + gpath, err := fs.RootPath(root, "/etc/group") + if err != nil { + return err + } + ugroups, err := user.ParseGroupFile(gpath) + if err != nil { + return err + } + groupMap := make(map[string]user.Group) + for _, group := range ugroups { + groupMap[group.Name] = group + } + var gids []uint32 + for _, group := range groups { + gid, err := strconv.ParseUint(group, 10, 32) + if err == nil { + gids = append(gids, uint32(gid)) + } else { + g, ok := groupMap[group] + if !ok { + return fmt.Errorf("unable to find group %s", group) + } + gids = append(gids, uint32(g.Gid)) + } + } + s.Process.User.AdditionalGids = append(s.Process.User.AdditionalGids, gids...) + return nil + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !filepath.IsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return setAdditionalGids(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setAdditionalGids) } } @@ -884,7 +1034,7 @@ func UserFromPath(root string, filter func(user.User) bool) (user.User, error) { // ErrNoGroupsFound can be returned from GIDFromPath var ErrNoGroupsFound = errors.New("no groups found") -// GIDFromPath inspects the GID using /etc/passwd in the specified rootfs. +// GIDFromPath inspects the GID using /etc/group in the specified rootfs. // filter can be nil. func GIDFromPath(root string, filter func(user.Group) bool) (gid uint32, err error) { gpath, err := fs.RootPath(root, "/etc/group") @@ -1098,20 +1248,13 @@ func WithDefaultUnixDevices(_ context.Context, _ Client, _ *containers.Container Allow: true, }, { + // "dev/ptmx" Type: "c", Major: intptr(5), Minor: intptr(2), Access: rwm, Allow: true, }, - { - // tuntap - Type: "c", - Major: intptr(10), - Minor: intptr(200), - Access: rwm, - Allow: true, - }, }...) return nil } @@ -1197,7 +1340,7 @@ func WithLinuxDevice(path, permissions string) SpecOpts { setLinux(s) setResources(s) - dev, err := deviceFromPath(path) + dev, err := DeviceFromPath(path) if err != nil { return err } @@ -1245,16 +1388,16 @@ var ErrNoShmMount = errors.New("no /dev/shm mount specified") // // The size value is specified in kb, kilobytes. func WithDevShmSize(kb int64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - for _, m := range s.Mounts { - if m.Source == "shm" && m.Type == "tmpfs" { - for i, o := range m.Options { - if strings.HasPrefix(o, "size=") { - m.Options[i] = fmt.Sprintf("size=%dk", kb) - return nil + return func(ctx context.Context, _ Client, _ *containers.Container, s *Spec) error { + for i, m := range s.Mounts { + if filepath.Clean(m.Destination) == "/dev/shm" && m.Source == "shm" && m.Type == "tmpfs" { + for i := 0; i < len(m.Options); i++ { + if strings.HasPrefix(m.Options[i], "size=") { + m.Options = append(m.Options[:i], m.Options[i+1:]...) + i-- } } - m.Options = append(m.Options, fmt.Sprintf("size=%dk", kb)) + s.Mounts[i].Options = append(m.Options, fmt.Sprintf("size=%dk", kb)) return nil } } diff --git a/oci/spec_opts_linux.go b/oci/spec_opts_linux.go index ae8c0a7..34651d1 100644 --- a/oci/spec_opts_linux.go +++ b/oci/spec_opts_linux.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -133,7 +131,7 @@ var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *con return WithCapabilities(caps)(ctx, client, c, s) } -// WithAllKnownCapabilities sets all the the known linux capabilities for the container process +// WithAllKnownCapabilities sets all the known linux capabilities for the container process var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { caps := cap.Known() return WithCapabilities(caps)(ctx, client, c, s) @@ -143,3 +141,19 @@ var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *conta func WithoutRunMount(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithoutMounts("/run")(ctx, client, c, s) } + +// WithRdt sets the container's RDT parameters +func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + s.Linux.IntelRdt = &specs.LinuxIntelRdt{ + ClosID: closID, + L3CacheSchema: l3CacheSchema, + MemBwSchema: memBwSchema, + } + return nil + } +} + +func escapeAndCombineArgs(args []string) string { + panic("not supported") +} diff --git a/oci/spec_opts_linux_test.go b/oci/spec_opts_linux_test.go index 23ecc5d..60f3ced 100644 --- a/oci/spec_opts_linux_test.go +++ b/oci/spec_opts_linux_test.go @@ -18,16 +18,203 @@ package oci import ( "context" - "io/ioutil" + "fmt" "os" "path/filepath" "testing" + "github.com/containerd/containerd/containers" "github.com/containerd/containerd/pkg/testutil" + "github.com/containerd/continuity/fs/fstest" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" ) +//nolint:gosec +func TestWithUserID(t *testing.T) { + t.Parallel() + + expectedPasswd := `root:x:0:0:root:/root:/bin/ash +guest:x:405:100:guest:/dev/null:/sbin/nologin +` + td := t.TempDir() + apply := fstest.Apply( + fstest.CreateDir("/etc", 0777), + fstest.CreateFile("/etc/passwd", []byte(expectedPasswd), 0777), + ) + if err := apply.Apply(td); err != nil { + t.Fatalf("failed to apply: %v", err) + } + c := containers.Container{ID: t.Name()} + testCases := []struct { + userID uint32 + expectedUID uint32 + expectedGID uint32 + }{ + { + userID: 0, + expectedUID: 0, + expectedGID: 0, + }, + { + userID: 405, + expectedUID: 405, + expectedGID: 100, + }, + { + userID: 1000, + expectedUID: 1000, + expectedGID: 0, + }, + } + for _, testCase := range testCases { + testCase := testCase + t.Run(fmt.Sprintf("user %d", testCase.userID), func(t *testing.T) { + t.Parallel() + s := Spec{ + Version: specs.Version, + Root: &specs.Root{ + Path: td, + }, + Linux: &specs.Linux{}, + } + err := WithUserID(testCase.userID)(context.Background(), nil, &c, &s) + assert.NoError(t, err) + assert.Equal(t, testCase.expectedUID, s.Process.User.UID) + assert.Equal(t, testCase.expectedGID, s.Process.User.GID) + }) + } +} + +//nolint:gosec +func TestWithUsername(t *testing.T) { + t.Parallel() + + expectedPasswd := `root:x:0:0:root:/root:/bin/ash +guest:x:405:100:guest:/dev/null:/sbin/nologin +` + td := t.TempDir() + apply := fstest.Apply( + fstest.CreateDir("/etc", 0777), + fstest.CreateFile("/etc/passwd", []byte(expectedPasswd), 0777), + ) + if err := apply.Apply(td); err != nil { + t.Fatalf("failed to apply: %v", err) + } + c := containers.Container{ID: t.Name()} + testCases := []struct { + user string + expectedUID uint32 + expectedGID uint32 + err string + }{ + { + user: "root", + expectedUID: 0, + expectedGID: 0, + }, + { + user: "guest", + expectedUID: 405, + expectedGID: 100, + }, + { + user: "1000", + err: "no users found", + }, + { + user: "unknown", + err: "no users found", + }, + } + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.user, func(t *testing.T) { + t.Parallel() + s := Spec{ + Version: specs.Version, + Root: &specs.Root{ + Path: td, + }, + Linux: &specs.Linux{}, + } + err := WithUsername(testCase.user)(context.Background(), nil, &c, &s) + if err != nil { + assert.EqualError(t, err, testCase.err) + } + assert.Equal(t, testCase.expectedUID, s.Process.User.UID) + assert.Equal(t, testCase.expectedGID, s.Process.User.GID) + }) + } + +} + +//nolint:gosec +func TestWithAdditionalGIDs(t *testing.T) { + t.Parallel() + expectedPasswd := `root:x:0:0:root:/root:/bin/ash +bin:x:1:1:bin:/bin:/sbin/nologin +daemon:x:2:2:daemon:/sbin:/sbin/nologin +` + expectedGroup := `root:x:0:root +bin:x:1:root,bin,daemon +daemon:x:2:root,bin,daemon +sys:x:3:root,bin,adm +` + td := t.TempDir() + apply := fstest.Apply( + fstest.CreateDir("/etc", 0777), + fstest.CreateFile("/etc/passwd", []byte(expectedPasswd), 0777), + fstest.CreateFile("/etc/group", []byte(expectedGroup), 0777), + ) + if err := apply.Apply(td); err != nil { + t.Fatalf("failed to apply: %v", err) + } + c := containers.Container{ID: t.Name()} + + testCases := []struct { + user string + expected []uint32 + }{ + { + user: "root", + expected: []uint32{0, 1, 2, 3}, + }, + { + user: "1000", + expected: []uint32{0}, + }, + { + user: "bin", + expected: []uint32{0, 2, 3}, + }, + { + user: "bin:root", + expected: []uint32{0}, + }, + { + user: "daemon", + expected: []uint32{0, 1}, + }, + } + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.user, func(t *testing.T) { + t.Parallel() + s := Spec{ + Version: specs.Version, + Root: &specs.Root{ + Path: td, + }, + } + err := WithAdditionalGIDs(testCase.user)(context.Background(), nil, &c, &s) + assert.NoError(t, err) + assert.Equal(t, testCase.expected, s.Process.User.AdditionalGids) + }) + } +} + func TestAddCaps(t *testing.T) { t.Parallel() @@ -111,14 +298,14 @@ func TestDropCaps(t *testing.T) { func TestGetDevices(t *testing.T) { testutil.RequiresRoot(t) - dir, err := ioutil.TempDir("/dev", t.Name()) + dir, err := os.MkdirTemp("/dev", t.Name()) if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) zero := filepath.Join(dir, "zero") - if err := ioutil.WriteFile(zero, nil, 0600); err != nil { + if err := os.WriteFile(zero, nil, 0600); err != nil { t.Fatal(err) } @@ -158,7 +345,7 @@ func TestGetDevices(t *testing.T) { }) t.Run("two devices", func(t *testing.T) { nullDev := filepath.Join(dir, "null") - if err := ioutil.WriteFile(nullDev, nil, 0600); err != nil { + if err := os.WriteFile(nullDev, nil, 0600); err != nil { t.Fatal(err) } @@ -233,7 +420,7 @@ func TestGetDevices(t *testing.T) { } }) t.Run("regular file in dir", func(t *testing.T) { - if err := ioutil.WriteFile(filepath.Join(dir, "somefile"), []byte("hello"), 0600); err != nil { + if err := os.WriteFile(filepath.Join(dir, "somefile"), []byte("hello"), 0600); err != nil { t.Fatal(err) } defer os.Remove(filepath.Join(dir, "somefile")) @@ -248,3 +435,84 @@ func TestGetDevices(t *testing.T) { }) }) } + +func TestWithAppendAdditionalGroups(t *testing.T) { + t.Parallel() + expectedContent := `root:x:0:root +bin:x:1:root,bin,daemon +daemon:x:2:root,bin,daemon +` + td := t.TempDir() + apply := fstest.Apply( + fstest.CreateDir("/etc", 0777), + fstest.CreateFile("/etc/group", []byte(expectedContent), 0777), + ) + if err := apply.Apply(td); err != nil { + t.Fatalf("failed to apply: %v", err) + } + c := containers.Container{ID: t.Name()} + + testCases := []struct { + name string + additionalGIDs []uint32 + groups []string + expected []uint32 + err string + }{ + { + name: "no additional gids", + groups: []string{}, + expected: []uint32{0}, + }, + { + name: "no additional gids, append root gid", + groups: []string{"root"}, + expected: []uint32{0}, + }, + { + name: "no additional gids, append bin and daemon gids", + groups: []string{"bin", "daemon"}, + expected: []uint32{0, 1, 2}, + }, + { + name: "has root additional gids, append bin and daemon gids", + additionalGIDs: []uint32{0}, + groups: []string{"bin", "daemon"}, + expected: []uint32{0, 1, 2}, + }, + { + name: "append group id", + groups: []string{"999"}, + expected: []uint32{0, 999}, + }, + { + name: "unknown group", + groups: []string{"unknown"}, + err: "unable to find group unknown", + expected: []uint32{0}, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + s := Spec{ + Version: specs.Version, + Root: &specs.Root{ + Path: td, + }, + Process: &specs.Process{ + User: specs.User{ + AdditionalGids: testCase.additionalGIDs, + }, + }, + } + err := WithAppendAdditionalGroups(testCase.groups...)(context.Background(), nil, &c, &s) + if err != nil { + assert.EqualError(t, err, testCase.err) + } + assert.Equal(t, testCase.expected, s.Process.User.AdditionalGids) + }) + } +} diff --git a/oci/spec_opts_nonlinux.go b/oci/spec_opts_nonlinux.go index 77a1636..ad1faa4 100644 --- a/oci/spec_opts_nonlinux.go +++ b/oci/spec_opts_nonlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* @@ -20,19 +21,32 @@ package oci import ( "context" + "errors" "github.com/containerd/containerd/containers" ) // WithAllCurrentCapabilities propagates the effective capabilities of the caller process to the container process. // The capability set may differ from WithAllKnownCapabilities when running in a container. -//nolint: deadcode, unused var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithCapabilities(nil)(ctx, client, c, s) } -// WithAllKnownCapabilities sets all the the known linux capabilities for the container process -//nolint: deadcode, unused +// WithAllKnownCapabilities sets all the known linux capabilities for the container process var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithCapabilities(nil)(ctx, client, c, s) } + +// WithCPUShares sets the container's cpu shares +func WithCPUShares(shares uint64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + return nil + } +} + +// WithRdt sets the container's RDT parameters +func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, _ *Spec) error { + return errors.New("RDT not supported") + } +} diff --git a/oci/spec_opts_test.go b/oci/spec_opts_test.go index 7cc94b9..ce3a850 100644 --- a/oci/spec_opts_test.go +++ b/oci/spec_opts_test.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -327,7 +326,7 @@ func TestWithSpecFromFile(t *testing.T) { ctx = namespaces.WithNamespace(context.Background(), "test") ) - fp, err := ioutil.TempFile("", "testwithdefaultspec.json") + fp, err := os.CreateTemp("", "testwithdefaultspec.json") if err != nil { t.Fatal(err) } @@ -552,55 +551,103 @@ func TestWithImageConfigArgs(t *testing.T) { func TestDevShmSize(t *testing.T) { t.Parallel() - var ( - s Spec - c = containers.Container{ID: t.Name()} - ctx = namespaces.WithNamespace(context.Background(), "test") - ) - err := populateDefaultUnixSpec(ctx, &s, c.ID) - if err != nil { - t.Fatal(err) + ss := []Spec{ + { + Mounts: []specs.Mount{ + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777"}, + }, + }, + }, + { + Mounts: []specs.Mount{ + { + Destination: "/test/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, + }, + }, + }, + { + Mounts: []specs.Mount{ + { + Destination: "/test/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k", "size=131072k"}, + }, + }, + }, } expected := "1024k" - if err := WithDevShmSize(1024)(nil, nil, nil, &s); err != nil { - t.Fatal(err) - } - m := getShmMount(&s) - if m == nil { - t.Fatal("no shm mount found") - } - o := getShmSize(m.Options) - if o == "" { - t.Fatal("shm size not specified") - } - parts := strings.Split(o, "=") - if len(parts) != 2 { - t.Fatal("invalid size format") - } - size := parts[1] - if size != expected { - t.Fatalf("size %s not equal %s", size, expected) + for _, s := range ss { + s := s + if err := WithDevShmSize(1024)(nil, nil, nil, &s); err != nil { + if err != ErrNoShmMount { + t.Fatal(err) + } + + if getDevShmMount(&s) == nil { + continue + } + t.Fatal("excepted nil /dev/shm mount") + } + + m := getDevShmMount(&s) + if m == nil { + t.Fatal("no shm mount found") + } + size, err := getShmSize(m.Options) + if err != nil { + t.Fatal(err) + } + if size != expected { + t.Fatalf("size %s not equal %s", size, expected) + } } } -func getShmMount(s *Spec) *specs.Mount { +func getDevShmMount(s *Spec) *specs.Mount { for _, m := range s.Mounts { - if m.Source == "shm" && m.Type == "tmpfs" { + if filepath.Clean(m.Destination) == "/dev/shm" && m.Source == "shm" && m.Type == "tmpfs" { return &m } } return nil } -func getShmSize(opts []string) string { +func getShmSize(opts []string) (string, error) { + // linux will use the last size option + var so string for _, o := range opts { if strings.HasPrefix(o, "size=") { - return o + if so != "" { + return "", errors.New("contains multiple size options") + } + so = o } } - return "" + if so == "" { + return "", errors.New("shm size not specified") + } + + parts := strings.Split(so, "=") + if len(parts) != 2 { + return "", errors.New("invalid size format") + } + return parts[1], nil } func TestWithoutMounts(t *testing.T) { diff --git a/oci/spec_opts_unix.go b/oci/spec_opts_unix.go index 80a5223..a616577 100644 --- a/oci/spec_opts_unix.go +++ b/oci/spec_opts_unix.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows // +build !linux,!windows /* @@ -36,7 +37,7 @@ func WithHostDevices(_ context.Context, _ Client, _ *containers.Container, s *Sp return nil } -// WithDevices recursively adds devices from the passed in path and associated cgroup rules for that device. +// WithDevices recursively adds devices from the passed in path. // If devicePath is a dir it traverses the dir to add all devices in that dir. // If devicePath is not a dir, it attempts to add the single device. func WithDevices(devicePath, containerPath, permissions string) SpecOpts { @@ -56,3 +57,7 @@ func WithCPUCFS(quota int64, period uint64) SpecOpts { return nil } } + +func escapeAndCombineArgs(args []string) string { + panic("not supported") +} diff --git a/oci/spec_opts_unix_test.go b/oci/spec_opts_unix_test.go index 4dd9a48..85d15bd 100644 --- a/oci/spec_opts_unix_test.go +++ b/oci/spec_opts_unix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/oci/spec_opts_windows.go b/oci/spec_opts_windows.go index 126a89e..602d40e 100644 --- a/oci/spec_opts_windows.go +++ b/oci/spec_opts_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,10 +18,13 @@ package oci import ( "context" + "errors" + "strings" "github.com/containerd/containerd/containers" + specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "golang.org/x/sys/windows" ) // WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the @@ -67,6 +68,16 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { } } +// WithProcessCommandLine replaces the command line on the generated spec +func WithProcessCommandLine(cmdLine string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.Args = nil + s.Process.CommandLine = cmdLine + return nil + } +} + // WithHostDevices adds all the hosts device nodes to the container's spec // // Not supported on windows @@ -74,6 +85,28 @@ func WithHostDevices(_ context.Context, _ Client, _ *containers.Container, s *Sp return nil } -func deviceFromPath(path string) (*specs.LinuxDevice, error) { +func DeviceFromPath(path string) (*specs.LinuxDevice, error) { return nil, errors.New("device from path not supported on Windows") } + +// WithWindowsNetworkNamespace sets the network namespace for a Windows container. +func WithWindowsNetworkNamespace(ns string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + s.Windows.Network.NetworkNamespace = ns + return nil + } +} + +func escapeAndCombineArgs(args []string) string { + escaped := make([]string, len(args)) + for i, a := range args { + escaped[i] = windows.EscapeArg(a) + } + return strings.Join(escaped, " ") +} diff --git a/oci/spec_opts_windows_test.go b/oci/spec_opts_windows_test.go index f2246ba..2646670 100644 --- a/oci/spec_opts_windows_test.go +++ b/oci/spec_opts_windows_test.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -24,6 +22,9 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/namespaces" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runtime-spec/specs-go" ) func TestWithCPUCount(t *testing.T) { @@ -110,3 +111,416 @@ func TestWithWindowNetworksAllowUnqualifiedDNSQuery(t *testing.T) { } } } + +// TestWithProcessArgsOverwritesWithImage verifies that when calling +// WithImageConfig followed by WithProcessArgs when `ArgsEscaped==false` that +// the process args overwrite the image args. +func TestWithProcessArgsOverwritesWithImage(t *testing.T) { + t.Parallel() + + img, err := newFakeImage(ocispec.Image{ + Config: ocispec.ImageConfig{ + Entrypoint: []string{"powershell.exe", "-Command", "Write-Host Hello"}, + Cmd: []string{"cmd.exe", "/S", "/C", "echo Hello"}, + ArgsEscaped: false, + }, + }) + if err != nil { + t.Fatal(err) + } + + s := Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Windows: &specs.Windows{}, + } + + args := []string{"cmd.exe", "echo", "should be set"} + opts := []SpecOpts{ + WithImageConfig(img), + WithProcessArgs(args...), + } + + for _, opt := range opts { + if err := opt(nil, nil, nil, &s); err != nil { + t.Fatal(err) + } + } + + if err := assertEqualsStringArrays(args, s.Process.Args); err != nil { + t.Fatal(err) + } + if s.Process.CommandLine != "" { + t.Fatalf("Expected empty CommandLine, got: '%s'", s.Process.CommandLine) + } +} + +// TestWithProcessArgsOverwritesWithImageArgsEscaped verifies that when calling +// WithImageConfig followed by WithProcessArgs when `ArgsEscaped==true` that the +// process args overwrite the image args. +func TestWithProcessArgsOverwritesWithImageArgsEscaped(t *testing.T) { + t.Parallel() + + img, err := newFakeImage(ocispec.Image{ + Config: ocispec.ImageConfig{ + Entrypoint: []string{`powershell.exe -Command "C:\My Data\MyExe.exe" -arg1 "-arg2 value2"`}, + Cmd: []string{`cmd.exe /S /C "C:\test path\test.exe"`}, + ArgsEscaped: true, + }, + }) + if err != nil { + t.Fatal(err) + } + + s := Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Windows: &specs.Windows{}, + } + + args := []string{"cmd.exe", "echo", "should be set"} + opts := []SpecOpts{ + WithImageConfig(img), + WithProcessArgs(args...), + } + + for _, opt := range opts { + if err := opt(nil, nil, nil, &s); err != nil { + t.Fatal(err) + } + } + + if err := assertEqualsStringArrays(args, s.Process.Args); err != nil { + t.Fatal(err) + } + if s.Process.CommandLine != "" { + t.Fatalf("Expected empty CommandLine, got: '%s'", s.Process.CommandLine) + } +} + +// TestWithImageOverwritesWithProcessArgs verifies that when calling +// WithProcessArgs followed by WithImageConfig `ArgsEscaped==false` that the +// image args overwrites process args. +func TestWithImageOverwritesWithProcessArgs(t *testing.T) { + t.Parallel() + + img, err := newFakeImage(ocispec.Image{ + Config: ocispec.ImageConfig{ + Entrypoint: []string{"powershell.exe", "-Command"}, + Cmd: []string{"Write-Host", "echo Hello"}, + }, + }) + if err != nil { + t.Fatal(err) + } + + s := Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Windows: &specs.Windows{}, + } + + opts := []SpecOpts{ + WithProcessArgs("cmd.exe", "echo", "should not be set"), + WithImageConfig(img), + } + + for _, opt := range opts { + if err := opt(nil, nil, nil, &s); err != nil { + t.Fatal(err) + } + } + + expectedArgs := []string{"powershell.exe", "-Command", "Write-Host", "echo Hello"} + if err := assertEqualsStringArrays(expectedArgs, s.Process.Args); err != nil { + t.Fatal(err) + } + if s.Process.CommandLine != "" { + t.Fatalf("Expected empty CommandLine, got: '%s'", s.Process.CommandLine) + } +} + +// TestWithImageOverwritesWithProcessArgs verifies that when calling +// WithProcessArgs followed by WithImageConfig `ArgsEscaped==true` that the +// image args overwrites process args. +func TestWithImageArgsEscapedOverwritesWithProcessArgs(t *testing.T) { + t.Parallel() + + img, err := newFakeImage(ocispec.Image{ + Config: ocispec.ImageConfig{ + Entrypoint: []string{`powershell.exe -Command "C:\My Data\MyExe.exe" -arg1 "-arg2 value2"`}, + Cmd: []string{`cmd.exe /S /C "C:\test path\test.exe"`}, + ArgsEscaped: true, + }, + }) + if err != nil { + t.Fatal(err) + } + + s := Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Windows: &specs.Windows{}, + } + + opts := []SpecOpts{ + WithProcessArgs("cmd.exe", "echo", "should not be set"), + WithImageConfig(img), + } + + expectedCommandLine := `powershell.exe -Command "C:\My Data\MyExe.exe" -arg1 "-arg2 value2" "cmd.exe /S /C \"C:\test path\test.exe\""` + + for _, opt := range opts { + if err := opt(nil, nil, nil, &s); err != nil { + t.Fatal(err) + } + } + + if s.Process.Args != nil { + t.Fatalf("Expected empty Process.Args, got: '%v'", s.Process.Args) + } + if expectedCommandLine != s.Process.CommandLine { + t.Fatalf("Expected CommandLine '%s', got: '%s'", expectedCommandLine, s.Process.CommandLine) + } +} + +func TestWithImageConfigArgsWindows(t *testing.T) { + testcases := []struct { + name string + entrypoint []string + cmd []string + args []string + + expectError bool + // When ArgsEscaped==false we always expect args and CommandLine=="" + expectedArgs []string + }{ + { + // This is not really a valid test case since Docker would have made + // the default cmd to be the shell. So just verify it hits the error + // case we expect. + name: "EmptyEntrypoint_EmptyCmd_EmptyArgs", + entrypoint: nil, + cmd: nil, + args: nil, + expectError: true, + }, + { + name: "EmptyEntrypoint_EmptyCmd_Args", + entrypoint: nil, + cmd: nil, + args: []string{"additional", "args"}, + expectedArgs: []string{"additional", "args"}, + }, + { + name: "EmptyEntrypoint_Cmd_EmptyArgs", + entrypoint: nil, + cmd: []string{"cmd", "args"}, + args: nil, + expectedArgs: []string{"cmd", "args"}, + }, + { + name: "EmptyEntrypoint_Cmd_Args", + entrypoint: nil, + cmd: []string{"cmd", "args"}, + args: []string{"additional", "args"}, + expectedArgs: []string{"additional", "args"}, // Args overwrite Cmd + }, + { + name: "Entrypoint_EmptyCmd_EmptyArgs", + entrypoint: []string{"entrypoint", "args"}, + cmd: nil, + args: nil, + expectedArgs: []string{"entrypoint", "args"}, + }, + { + name: "Entrypoint_EmptyCmd_Args", + entrypoint: []string{"entrypoint", "args"}, + cmd: nil, + args: []string{"additional", "args"}, + expectedArgs: []string{"entrypoint", "args", "additional", "args"}, + }, + { + name: "Entrypoint_Cmd_EmptyArgs", + entrypoint: []string{"entrypoint", "args"}, + cmd: []string{"cmd", "args"}, + args: nil, + expectedArgs: []string{"entrypoint", "args", "cmd", "args"}, + }, + { + name: "Entrypoint_Cmd_Args", + entrypoint: []string{"entrypoint", "args"}, + cmd: []string{"cmd", "args"}, + args: []string{"additional", "args"}, // Args overwrites Cmd + expectedArgs: []string{"entrypoint", "args", "additional", "args"}, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + img, err := newFakeImage(ocispec.Image{ + Config: ocispec.ImageConfig{ + Entrypoint: tc.entrypoint, + Cmd: tc.cmd, + }, + }) + if err != nil { + t.Fatal(err) + } + + s := Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Windows: &specs.Windows{}, + } + + opts := []SpecOpts{ + WithImageConfigArgs(img, tc.args), + } + + for _, opt := range opts { + if err := opt(nil, nil, nil, &s); err != nil { + if tc.expectError { + continue + } + t.Fatal(err) + } + } + + if err := assertEqualsStringArrays(tc.expectedArgs, s.Process.Args); err != nil { + t.Fatal(err) + } + if s.Process.CommandLine != "" { + t.Fatalf("Expected empty CommandLine, got: '%s'", s.Process.CommandLine) + } + }) + } +} + +func TestWithImageConfigArgsEscapedWindows(t *testing.T) { + testcases := []struct { + name string + entrypoint []string + cmd []string + args []string + + expectError bool + expectedArgs []string + expectedCommandLine string + }{ + { + // This is not really a valid test case since Docker would have made + // the default cmd to be the shell. So just verify it hits the error + // case we expect. + name: "EmptyEntrypoint_EmptyCmd_EmptyArgs", + entrypoint: nil, + cmd: nil, + args: nil, + expectError: true, + expectedArgs: nil, + expectedCommandLine: "", + }, + { + // This case is special for ArgsEscaped, since there is no Image + // Default Args should be passed as ProcessArgs not as Cmdline + name: "EmptyEntrypoint_EmptyCmd_Args", + entrypoint: nil, + cmd: nil, + args: []string{"additional", "-args", "hello world"}, + expectedArgs: []string{"additional", "-args", "hello world"}, + expectedCommandLine: "", + }, + { + name: "EmptyEntrypoint_Cmd_EmptyArgs", + entrypoint: nil, + cmd: []string{`cmd -args "hello world"`}, + args: nil, + expectedCommandLine: `cmd -args "hello world"`, + }, + { + // This case is a second special case for ArgsEscaped, since Args + // overwrite Cmd the args are not from the image, so ArgsEscaped + // should be ignored, and passed as Args not CommandLine. + name: "EmptyEntrypoint_Cmd_Args", + entrypoint: nil, + cmd: []string{`cmd -args "hello world"`}, + args: []string{"additional", "args"}, + expectedArgs: []string{"additional", "args"}, // Args overwrite Cmd + expectedCommandLine: "", + }, + { + name: "Entrypoint_EmptyCmd_EmptyArgs", + entrypoint: []string{`"C:\My Folder\MyProcess.exe" -arg1 "test value"`}, + cmd: nil, + args: nil, + expectedCommandLine: `"C:\My Folder\MyProcess.exe" -arg1 "test value"`, + }, + { + name: "Entrypoint_EmptyCmd_Args", + entrypoint: []string{`"C:\My Folder\MyProcess.exe" -arg1 "test value"`}, + cmd: nil, + args: []string{"additional", "args with spaces"}, + expectedCommandLine: `"C:\My Folder\MyProcess.exe" -arg1 "test value" additional "args with spaces"`, + }, + { + // This case will not work in Docker today so adding the test to + // confirm we fail in the same way. Although the appending of + // Entrypoint + " " + Cmd here works, Cmd is double escaped and the + // container would not launch. This is because when Docker built + // such an image it escaped both Entrypoint and Cmd. However the + // docs say that CMD should always be appened to entrypoint if not + // overwritten so this results in an incorrect cmdline. + name: "Entrypoint_Cmd_EmptyArgs", + entrypoint: []string{`"C:\My Folder\MyProcess.exe" -arg1 "test value"`}, + cmd: []string{`cmd -args "hello world"`}, + args: nil, + expectedCommandLine: `"C:\My Folder\MyProcess.exe" -arg1 "test value" "cmd -args \"hello world\""`, + }, + { + name: "Entrypoint_Cmd_Args", + entrypoint: []string{`"C:\My Folder\MyProcess.exe" -arg1 "test value"`}, + cmd: []string{`cmd -args "hello world"`}, + args: []string{"additional", "args with spaces"}, // Args overwrites Cmd + expectedCommandLine: `"C:\My Folder\MyProcess.exe" -arg1 "test value" additional "args with spaces"`, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + img, err := newFakeImage(ocispec.Image{ + Config: ocispec.ImageConfig{ + Entrypoint: tc.entrypoint, + Cmd: tc.cmd, + ArgsEscaped: true, + }, + }) + if err != nil { + t.Fatal(err) + } + + s := Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Windows: &specs.Windows{}, + } + + opts := []SpecOpts{ + WithImageConfigArgs(img, tc.args), + } + + for _, opt := range opts { + if err := opt(nil, nil, nil, &s); err != nil { + if tc.expectError { + continue + } + t.Fatal(err) + } + } + + if err := assertEqualsStringArrays(tc.expectedArgs, s.Process.Args); err != nil { + t.Fatal(err) + } + if tc.expectedCommandLine != s.Process.CommandLine { + t.Fatalf("Expected CommandLine: '%s', got: '%s'", tc.expectedCommandLine, s.Process.CommandLine) + } + }) + } +} diff --git a/oci/utils_unix.go b/oci/utils_unix.go index 108cacf..306f098 100644 --- a/oci/utils_unix.go +++ b/oci/utils_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,16 +20,25 @@ package oci import ( - "io/ioutil" + "errors" + "fmt" "os" "path/filepath" + "github.com/containerd/containerd/pkg/userns" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) -var errNotADevice = errors.New("not a device node") +// ErrNotADevice denotes that a file is not a valid linux device. +var ErrNotADevice = errors.New("not a device node") + +// Testing dependencies +var ( + osReadDir = os.ReadDir + usernsRunningInUserNS = userns.RunningInUserNS + overrideDeviceFromPath func(path string) error +) // HostDevices returns all devices that can be found under /dev directory. func HostDevices() ([]specs.LinuxDevice, error) { @@ -38,11 +48,11 @@ func HostDevices() ([]specs.LinuxDevice, error) { func getDevices(path, containerPath string) ([]specs.LinuxDevice, error) { stat, err := os.Stat(path) if err != nil { - return nil, errors.Wrap(err, "error stating device path") + return nil, fmt.Errorf("error stating device path: %w", err) } if !stat.IsDir() { - dev, err := deviceFromPath(path) + dev, err := DeviceFromPath(path) if err != nil { return nil, err } @@ -52,7 +62,7 @@ func getDevices(path, containerPath string) ([]specs.LinuxDevice, error) { return []specs.LinuxDevice{*dev}, nil } - files, err := ioutil.ReadDir(path) + files, err := osReadDir(path) if err != nil { return nil, err } @@ -72,6 +82,12 @@ func getDevices(path, containerPath string) ([]specs.LinuxDevice, error) { } sub, err := getDevices(filepath.Join(path, f.Name()), cp) if err != nil { + if errors.Is(err, os.ErrPermission) && usernsRunningInUserNS() { + // ignore the "permission denied" error if running in userns. + // This allows rootless containers to use devices that are + // accessible, ignoring devices / subdirectories that are not. + continue + } return nil, err } @@ -80,49 +96,77 @@ func getDevices(path, containerPath string) ([]specs.LinuxDevice, error) { } case f.Name() == "console": continue - } - device, err := deviceFromPath(filepath.Join(path, f.Name())) - if err != nil { - if err == errNotADevice { + default: + device, err := DeviceFromPath(filepath.Join(path, f.Name())) + if err != nil { + if err == ErrNotADevice { + continue + } + if os.IsNotExist(err) { + continue + } + if errors.Is(err, os.ErrPermission) && usernsRunningInUserNS() { + // ignore the "permission denied" error if running in userns. + // This allows rootless containers to use devices that are + // accessible, ignoring devices that are not. + continue + } + return nil, err + } + if device.Type == fifoDevice { continue } - if os.IsNotExist(err) { - continue + if containerPath != "" { + device.Path = filepath.Join(containerPath, filepath.Base(f.Name())) } - return nil, err + out = append(out, *device) } - if containerPath != "" { - device.Path = filepath.Join(containerPath, filepath.Base(f.Name())) - } - out = append(out, *device) } return out, nil } -func deviceFromPath(path string) (*specs.LinuxDevice, error) { +// TODO consider adding these consts to the OCI runtime-spec. +const ( + wildcardDevice = "a" //nolint:nolintlint,unused,varcheck // currently unused, but should be included when upstreaming to OCI runtime-spec. + blockDevice = "b" + charDevice = "c" // or "u" + fifoDevice = "p" +) + +// DeviceFromPath takes the path to a device to look up the information about a +// linux device and returns that information as a LinuxDevice struct. +func DeviceFromPath(path string) (*specs.LinuxDevice, error) { + if overrideDeviceFromPath != nil { + if err := overrideDeviceFromPath(path); err != nil { + return nil, err + } + } + var stat unix.Stat_t if err := unix.Lstat(path, &stat); err != nil { return nil, err } var ( - devNumber = uint64(stat.Rdev) //nolint: unconvert // the type is 32bit on mips. + devNumber = uint64(stat.Rdev) //nolint:nolintlint,unconvert // the type is 32bit on mips. major = unix.Major(devNumber) minor = unix.Minor(devNumber) ) - if major == 0 { - return nil, errNotADevice - } var ( devType string mode = stat.Mode ) - switch { - case mode&unix.S_IFBLK == unix.S_IFBLK: - devType = "b" - case mode&unix.S_IFCHR == unix.S_IFCHR: - devType = "c" + + switch mode & unix.S_IFMT { + case unix.S_IFBLK: + devType = blockDevice + case unix.S_IFCHR: + devType = charDevice + case unix.S_IFIFO: + devType = fifoDevice + default: + return nil, ErrNotADevice } fm := os.FileMode(mode &^ unix.S_IFMT) return &specs.LinuxDevice{ diff --git a/oci/utils_unix_go116_test.go b/oci/utils_unix_go116_test.go new file mode 100644 index 0000000..b1fa01b --- /dev/null +++ b/oci/utils_unix_go116_test.go @@ -0,0 +1,55 @@ +//go:build !go1.17 && !windows && !darwin +// +build !go1.17,!windows,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import "io/fs" + +// The following code is adapted from go1.17.1/src/io/fs/readdir.go +// to compensate for the lack of fs.FileInfoToDirEntry in Go 1.16. + +// dirInfo is a DirEntry based on a FileInfo. +type dirInfo struct { + fileInfo fs.FileInfo +} + +func (di dirInfo) IsDir() bool { + return di.fileInfo.IsDir() +} + +func (di dirInfo) Type() fs.FileMode { + return di.fileInfo.Mode().Type() +} + +func (di dirInfo) Info() (fs.FileInfo, error) { + return di.fileInfo, nil +} + +func (di dirInfo) Name() string { + return di.fileInfo.Name() +} + +// fileInfoToDirEntry returns a DirEntry that returns information from info. +// If info is nil, FileInfoToDirEntry returns nil. +func fileInfoToDirEntry(info fs.FileInfo) fs.DirEntry { + if info == nil { + return nil + } + return dirInfo{fileInfo: info} +} diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/oci/utils_unix_go117_test.go similarity index 80% rename from vendor/github.com/containerd/continuity/sysx/nodata_unix.go rename to oci/utils_unix_go117_test.go index de4b3d5..20ef980 100644 --- a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go +++ b/oci/utils_unix_go117_test.go @@ -1,4 +1,5 @@ -// +build darwin freebsd openbsd +//go:build go1.17 && !windows && !darwin +// +build go1.17,!windows,!darwin /* Copyright The containerd Authors. @@ -16,10 +17,8 @@ limitations under the License. */ -package sysx +package oci -import ( - "syscall" -) +import "io/fs" -const ENODATA = syscall.ENOATTR +var fileInfoToDirEntry = fs.FileInfoToDirEntry diff --git a/oci/utils_unix_test.go b/oci/utils_unix_test.go new file mode 100644 index 0000000..9f2c8d0 --- /dev/null +++ b/oci/utils_unix_test.go @@ -0,0 +1,165 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "errors" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/containerd/containerd/pkg/userns" +) + +func cleanupTest() { + overrideDeviceFromPath = nil + osReadDir = os.ReadDir + usernsRunningInUserNS = userns.RunningInUserNS +} + +// Based on test from runc: +// https://github.com/opencontainers/runc/blob/v1.0.0/libcontainer/devices/device_unix_test.go#L34-L47 +func TestHostDevicesOSReadDirFailure(t *testing.T) { + testError := fmt.Errorf("test error: %w", os.ErrPermission) + + // Override os.ReadDir to inject error. + osReadDir = func(dirname string) ([]os.DirEntry, error) { + return nil, testError + } + + // Override userns.RunningInUserNS to ensure not running in user namespace. + usernsRunningInUserNS = func() bool { + return false + } + defer cleanupTest() + + _, err := HostDevices() + if !errors.Is(err, testError) { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} + +// Based on test from runc: +// https://github.com/opencontainers/runc/blob/v1.0.0/libcontainer/devices/device_unix_test.go#L34-L47 +func TestHostDevicesOSReadDirFailureInUserNS(t *testing.T) { + testError := fmt.Errorf("test error: %w", os.ErrPermission) + + // Override os.ReadDir to inject error. + osReadDir = func(dirname string) ([]os.DirEntry, error) { + if dirname == "/dev" { + fi, err := os.Lstat("/dev/null") + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + return []os.DirEntry{fileInfoToDirEntry(fi)}, nil + } + return nil, testError + } + // Override userns.RunningInUserNS to ensure running in user namespace. + usernsRunningInUserNS = func() bool { + return true + } + defer cleanupTest() + + _, err := HostDevices() + if !errors.Is(err, nil) { + t.Fatalf("Unexpected error %v, expected %v", err, nil) + } +} + +// Based on test from runc: +// https://github.com/opencontainers/runc/blob/v1.0.0/libcontainer/devices/device_unix_test.go#L49-L74 +func TestHostDevicesDeviceFromPathFailure(t *testing.T) { + testError := fmt.Errorf("test error: %w", os.ErrPermission) + + // Override DeviceFromPath to produce an os.ErrPermission on /dev/null. + overrideDeviceFromPath = func(path string) error { + if path == "/dev/null" { + return testError + } + return nil + } + + // Override userns.RunningInUserNS to ensure not running in user namespace. + usernsRunningInUserNS = func() bool { + return false + } + defer cleanupTest() + + d, err := HostDevices() + if !errors.Is(err, testError) { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } + + assert.Equal(t, 0, len(d)) +} + +// Based on test from runc: +// https://github.com/opencontainers/runc/blob/v1.0.0/libcontainer/devices/device_unix_test.go#L49-L74 +func TestHostDevicesDeviceFromPathFailureInUserNS(t *testing.T) { + testError := fmt.Errorf("test error: %w", os.ErrPermission) + + // Override DeviceFromPath to produce an os.ErrPermission on all devices, + // except for /dev/null. + overrideDeviceFromPath = func(path string) error { + if path == "/dev/null" { + return nil + } + return testError + } + + // Override userns.RunningInUserNS to ensure running in user namespace. + usernsRunningInUserNS = func() bool { + return true + } + defer cleanupTest() + + d, err := HostDevices() + if !errors.Is(err, nil) { + t.Fatalf("Unexpected error %v, expected %v", err, nil) + } + assert.Equal(t, 1, len(d)) + assert.Equal(t, d[0].Path, "/dev/null") +} + +func TestHostDevicesAllValid(t *testing.T) { + devices, err := HostDevices() + if err != nil { + t.Fatalf("failed to get host devices: %v", err) + } + + for _, device := range devices { + // Devices can't have major number 0. + if device.Major == 0 { + t.Errorf("device entry %+v has zero major number", device) + } + switch device.Type { + case blockDevice, charDevice: + case fifoDevice: + t.Logf("fifo devices shouldn't show up from HostDevices") + fallthrough + default: + t.Errorf("device entry %+v has unexpected type %v", device, device.Type) + } + } +} diff --git a/pkg/apparmor/apparmor.go b/pkg/apparmor/apparmor.go index dd4d860..293f8ba 100644 --- a/pkg/apparmor/apparmor.go +++ b/pkg/apparmor/apparmor.go @@ -16,12 +16,13 @@ package apparmor -// HostSupports returns true if apparmor is enabled for the host, // On non-Linux returns false -// On Linux returns true if apparmor_parser is enabled, and if we -// are not running docker-in-docker. +// HostSupports returns true if apparmor is enabled for the host: +// - On Linux returns true if apparmor is enabled, apparmor_parser is +// present, and if we are not running docker-in-docker. +// - On non-Linux returns false. // -// It is a modified version of libcontainer/apparmor.IsEnabled(), which does not -// check for apparmor_parser to be present, or if we're running docker-in-docker. +// This is derived from libcontainer/apparmor.IsEnabled(), with the addition +// of checks for apparmor_parser to be present and docker-in-docker. func HostSupports() bool { return hostSupports() } diff --git a/pkg/apparmor/apparmor_linux.go b/pkg/apparmor/apparmor_linux.go index ee38585..c96de6a 100644 --- a/pkg/apparmor/apparmor_linux.go +++ b/pkg/apparmor/apparmor_linux.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,7 +17,6 @@ package apparmor import ( - "io/ioutil" "os" "sync" ) @@ -32,14 +29,14 @@ var ( // hostSupports returns true if apparmor is enabled for the host, if // apparmor_parser is enabled, and if we are not running docker-in-docker. // -// It is a modified version of libcontainer/apparmor.IsEnabled(), which does not -// check for apparmor_parser to be present, or if we're running docker-in-docker. +// This is derived from libcontainer/apparmor.IsEnabled(), with the addition +// of checks for apparmor_parser to be present and docker-in-docker. func hostSupports() bool { checkAppArmor.Do(func() { - // see https://github.com/docker/docker/commit/de191e86321f7d3136ff42ff75826b8107399497 + // see https://github.com/opencontainers/runc/blob/0d49470392206f40eaab3b2190a57fe7bb3df458/libcontainer/apparmor/apparmor_linux.go if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { if _, err = os.Stat("/sbin/apparmor_parser"); err == nil { - buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + buf, err := os.ReadFile("/sys/module/apparmor/parameters/enabled") appArmorSupported = err == nil && len(buf) > 1 && buf[0] == 'Y' } } diff --git a/pkg/apparmor/apparmor_unsupported.go b/pkg/apparmor/apparmor_unsupported.go index 428d364..8331703 100644 --- a/pkg/apparmor/apparmor_unsupported.go +++ b/pkg/apparmor/apparmor_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/pkg/atomicfile/file.go b/pkg/atomicfile/file.go new file mode 100644 index 0000000..7b870f7 --- /dev/null +++ b/pkg/atomicfile/file.go @@ -0,0 +1,148 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* +Package atomicfile provides a mechanism (on Unix-like platforms) to present a consistent view of a file to separate +processes even while the file is being written. This is accomplished by writing a temporary file, syncing to disk, and +renaming over the destination file name. + +Partial/inconsistent reads can occur due to: + 1. A process attempting to read the file while it is being written to (both in the case of a new file with a + short/incomplete write or in the case of an existing, updated file where new bytes may be written at the beginning + but old bytes may still be present after). + 2. Concurrent goroutines leading to multiple active writers of the same file. + +The above mechanism explicitly protects against (1) as all writes are to a file with a temporary name. + +There is no explicit protection against multiple, concurrent goroutines attempting to write the same file. However, +atomically writing the file should mean only one writer will "win" and a consistent file will be visible. + +Note: atomicfile is partially implemented for Windows. The Windows codepath performs the same operations, however +Windows does not guarantee that a rename operation is atomic; a crash in the middle may leave the destination file +truncated rather than with the expected content. +*/ +package atomicfile + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// File is an io.ReadWriteCloser that can also be Canceled if a change needs to be abandoned. +type File interface { + io.ReadWriteCloser + // Cancel abandons a change to a file. This can be called if a write fails or another error occurs. + Cancel() error +} + +// ErrClosed is returned if Read or Write are called on a closed File. +var ErrClosed = errors.New("file is closed") + +// New returns a new atomic file. On Unix-like platforms, the writer (an io.ReadWriteCloser) is backed by a temporary +// file placed into the same directory as the destination file (using filepath.Dir to split the directory from the +// name). On a call to Close the temporary file is synced to disk and renamed to its final name, hiding any previous +// file by the same name. +// +// Note: Take care to call Close and handle any errors that are returned. Errors returned from Close may indicate that +// the file was not written with its final name. +func New(name string, mode os.FileMode) (File, error) { + return newFile(name, mode) +} + +type atomicFile struct { + name string + f *os.File + closed bool + closedMu sync.RWMutex +} + +func newFile(name string, mode os.FileMode) (File, error) { + dir := filepath.Dir(name) + f, err := os.CreateTemp(dir, "") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %w", err) + } + if err := f.Chmod(mode); err != nil { + return nil, fmt.Errorf("failed to change temp file permissions: %w", err) + } + return &atomicFile{name: name, f: f}, nil +} + +func (a *atomicFile) Close() (err error) { + a.closedMu.Lock() + defer a.closedMu.Unlock() + + if a.closed { + return nil + } + a.closed = true + + defer func() { + if err != nil { + _ = os.Remove(a.f.Name()) // ignore errors + } + }() + // The order of operations here is: + // 1. sync + // 2. close + // 3. rename + // While the ordering of 2 and 3 is not important on Unix-like operating systems, Windows cannot rename an open + // file. By closing first, we allow the rename operation to succeed. + if err = a.f.Sync(); err != nil { + return fmt.Errorf("failed to sync temp file %q: %w", a.f.Name(), err) + } + if err = a.f.Close(); err != nil { + return fmt.Errorf("failed to close temp file %q: %w", a.f.Name(), err) + } + if err = os.Rename(a.f.Name(), a.name); err != nil { + return fmt.Errorf("failed to rename %q to %q: %w", a.f.Name(), a.name, err) + } + return nil +} + +func (a *atomicFile) Cancel() error { + a.closedMu.Lock() + defer a.closedMu.Unlock() + + if a.closed { + return nil + } + a.closed = true + _ = a.f.Close() // ignore error + return os.Remove(a.f.Name()) +} + +func (a *atomicFile) Read(p []byte) (n int, err error) { + a.closedMu.RLock() + defer a.closedMu.RUnlock() + if a.closed { + return 0, ErrClosed + } + return a.f.Read(p) +} + +func (a *atomicFile) Write(p []byte) (n int, err error) { + a.closedMu.RLock() + defer a.closedMu.RUnlock() + if a.closed { + return 0, ErrClosed + } + return a.f.Write(p) +} diff --git a/pkg/atomicfile/file_test.go b/pkg/atomicfile/file_test.go new file mode 100644 index 0000000..4c86d33 --- /dev/null +++ b/pkg/atomicfile/file_test.go @@ -0,0 +1,77 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package atomicfile + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFile(t *testing.T) { + const content = "this is some test content for a file" + dir := t.TempDir() + path := filepath.Join(dir, "test-file") + + f, err := New(path, 0o644) + require.NoError(t, err, "failed to create file") + n, err := fmt.Fprint(f, content) + assert.NoError(t, err, "failed to write content") + assert.Equal(t, len(content), n, "written bytes should be equal") + err = f.Close() + require.NoError(t, err, "failed to close file") + + actual, err := os.ReadFile(path) + assert.NoError(t, err, "failed to read file") + assert.Equal(t, content, string(actual)) +} + +func TestConcurrentWrites(t *testing.T) { + const content1 = "this is the first content of the file. there should be none other." + const content2 = "the second content of the file should win!" + dir := t.TempDir() + path := filepath.Join(dir, "test-file") + + file1, err := New(path, 0o600) + require.NoError(t, err, "failed to create file1") + file2, err := New(path, 0o644) + require.NoError(t, err, "failed to create file2") + + n, err := fmt.Fprint(file1, content1) + assert.NoError(t, err, "failed to write content1") + assert.Equal(t, len(content1), n, "written bytes should be equal") + + n, err = fmt.Fprint(file2, content2) + assert.NoError(t, err, "failed to write content2") + assert.Equal(t, len(content2), n, "written bytes should be equal") + + err = file1.Close() + require.NoError(t, err, "failed to close file1") + actual, err := os.ReadFile(path) + assert.NoError(t, err, "failed to read file") + assert.Equal(t, content1, string(actual)) + + err = file2.Close() + require.NoError(t, err, "failed to close file2") + actual, err = os.ReadFile(path) + assert.NoError(t, err, "failed to read file") + assert.Equal(t, content2, string(actual)) +} diff --git a/pkg/cap/cap_linux.go b/pkg/cap/cap_linux.go index 35772a4..2621257 100644 --- a/pkg/cap/cap_linux.go +++ b/pkg/cap/cap_linux.go @@ -19,12 +19,11 @@ package cap import ( "bufio" + "fmt" "io" "os" "strconv" "strings" - - "github.com/pkg/errors" ) // FromNumber returns a cap string like "CAP_SYS_ADMIN" @@ -91,7 +90,7 @@ func ParseProcPIDStatus(r io.Reader) (map[Type]uint64, error) { case "CapInh", "CapPrm", "CapEff", "CapBnd", "CapAmb": ui64, err := strconv.ParseUint(v, 16, 64) if err != nil { - return nil, errors.Errorf("failed to parse line %q", line) + return nil, fmt.Errorf("failed to parse line %q", line) } switch k { case "CapInh": @@ -117,9 +116,6 @@ func ParseProcPIDStatus(r io.Reader) (map[Type]uint64, error) { // the current process. // // The result is like []string{"CAP_SYS_ADMIN", ...}. -// -// The result does not contain caps that are not recognized by -// the "github.com/syndtr/gocapability" library. func Current() ([]string, error) { f, err := os.Open("/proc/self/status") if err != nil { diff --git a/pkg/cri/annotations/annotations.go b/pkg/cri/annotations/annotations.go index 68a0f48..59624b5 100644 --- a/pkg/cri/annotations/annotations.go +++ b/pkg/cri/annotations/annotations.go @@ -32,6 +32,16 @@ const ( // SandboxID is the sandbox ID annotation SandboxID = "io.kubernetes.cri.sandbox-id" + // SandboxCPU annotations are based on the initial CPU configuration for the sandbox. This is calculated as the + // sum of container CPU resources, optionally provided by Kubelet (introduced in 1.23) as part of the PodSandboxConfig + SandboxCPUPeriod = "io.kubernetes.cri.sandbox-cpu-period" + SandboxCPUQuota = "io.kubernetes.cri.sandbox-cpu-quota" + SandboxCPUShares = "io.kubernetes.cri.sandbox-cpu-shares" + + // SandboxMemory is the initial amount of memory associated with this sandbox. This is calculated as the sum + // of container memory, optionally provided by Kubelet (introduced in 1.23) as part of the PodSandboxConfig. + SandboxMem = "io.kubernetes.cri.sandbox-memory" + // SandboxLogDir is the pod log directory annotation. // If the sandbox needs to generate any log, it will put it into this directory. // Kubelet will be responsible for: @@ -48,6 +58,11 @@ const ( // SandboxNamespace is the name of the namespace of the sandbox (pod) SandboxNamespace = "io.kubernetes.cri.sandbox-namespace" + // SandboxUID is the uid of the sandbox (pod) passed to CRI via RunPodSanbox, + // this field is useful for linking the uid created by the CRI client (e.g. kubelet) + // to the internal Sandbox.ID created by the containerd sandbox service + SandboxUID = "io.kubernetes.cri.sandbox-uid" + // SandboxName is the name of the sandbox (pod) SandboxName = "io.kubernetes.cri.sandbox-name" @@ -59,4 +74,7 @@ const ( // PodAnnotations are the annotations of the pod PodAnnotations = "io.kubernetes.cri.pod-annotations" + + // WindowsHostProcess is used by hcsshim to identify windows pods that are running HostProcesses + WindowsHostProcess = "microsoft.com/hostprocess-container" ) diff --git a/pkg/cri/config/config.go b/pkg/cri/config/config.go index e6f13f9..9a986ef 100644 --- a/pkg/cri/config/config.go +++ b/pkg/cri/config/config.go @@ -18,12 +18,13 @@ package config import ( "context" + "errors" + "fmt" "net/url" "time" "github.com/containerd/containerd/log" "github.com/containerd/containerd/plugin" - "github.com/pkg/errors" ) // Runtime struct to contain the type(ID), engine, and root variables for a default runtime @@ -31,6 +32,10 @@ import ( type Runtime struct { // Type is the runtime type to use in containerd e.g. io.containerd.runtime.v1.linux Type string `toml:"runtime_type" json:"runtimeType"` + // Path is an optional field that can be used to overwrite path to a shim runtime binary. + // When specified, containerd will ignore runtime name field when resolving shim location. + // Path must be abs. + Path string `toml:"runtime_path" json:"runtimePath"` // Engine is the name of the runtime engine used by containerd. // This only works for runtime type "io.containerd.runtime.v1.linux". // DEPRECATED: use Options instead. Remove when shim v1 is deprecated. @@ -56,6 +61,12 @@ type Runtime struct { PrivilegedWithoutHostDevices bool `toml:"privileged_without_host_devices" json:"privileged_without_host_devices"` // BaseRuntimeSpec is a json file with OCI spec to use as base spec that all container's will be created from. BaseRuntimeSpec string `toml:"base_runtime_spec" json:"baseRuntimeSpec"` + // NetworkPluginConfDir is a directory containing the CNI network information for the runtime class. + NetworkPluginConfDir string `toml:"cni_conf_dir" json:"cniConfDir"` + // NetworkPluginMaxConfNum is the max number of plugin config files that will + // be loaded from the cni config directory by go-cni. Set the value to 0 to + // load all config files (no arbitrary limit). The legacy default value is 1. + NetworkPluginMaxConfNum int `toml:"cni_max_conf_num" json:"cniMaxConfNum"` } // ContainerdConfig contains toml config related to containerd @@ -87,6 +98,10 @@ type ContainerdConfig struct { // remove layers from the content store after successfully unpacking these // layers to the snapshotter. DiscardUnpackedLayers bool `toml:"discard_unpacked_layers" json:"discardUnpackedLayers"` + + // IgnoreRdtNotEnabledErrors is a boolean flag to ignore RDT related errors + // when RDT support has not been enabled. + IgnoreRdtNotEnabledErrors bool `toml:"ignore_rdt_not_enabled_errors" json:"ignoreRdtNotEnabledErrors"` } // CniConfig contains toml config related to cni @@ -111,6 +126,13 @@ type CniConfig struct { // a temporary backward-compatible solution for them. // TODO(random-liu): Deprecate this option when kubenet is deprecated. NetworkPluginConfTemplate string `toml:"conf_template" json:"confTemplate"` + // IPPreference specifies the strategy to use when selecting the main IP address for a pod. + // + // Options include: + // * ipv4, "" - (default) select the first ipv4 address + // * ipv6 - select the first ipv6 address + // * cni - use the order returned by the CNI plugins, returning the first IP address from the results + IPPreference string `toml:"ip_pref" json:"ipPref"` } // Mirror contains the config related to the registry mirror @@ -182,10 +204,10 @@ type ImageDecryption struct { // KeyModel specifies the trust model of where keys should reside. // // Details of field usage can be found in: - // https://github.com/containerd/cri/tree/master/docs/config.md + // https://github.com/containerd/containerd/tree/main/docs/cri/config.md // // Details of key models can be found in: - // https://github.com/containerd/cri/tree/master/docs/decryption.md + // https://github.com/containerd/containerd/tree/main/docs/cri/decryption.md KeyModel string `toml:"key_model" json:"keyModel"` } @@ -258,6 +280,9 @@ type PluginConfig struct { // present in /sys/fs/cgroup/cgroup.controllers. // This helps with running rootless mode + cgroup v2 + systemd but without hugetlb delegation. DisableHugetlbController bool `toml:"disable_hugetlb_controller" json:"disableHugetlbController"` + // DeviceOwnershipFromSecurityContext changes the default behavior of setting container devices uid/gid + // from CRI's SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. Defaults to false. + DeviceOwnershipFromSecurityContext bool `toml:"device_ownership_from_security_context" json:"device_ownership_from_security_context"` // IgnoreImageDefinedVolumes ignores volumes defined by the image. Useful for better resource // isolation, security and early detection of issues in the mount configuration when using // ReadOnlyRootFilesystem since containers won't silently mount a temporary volume. @@ -266,6 +291,17 @@ type PluginConfig struct { // of being placed under the hardcoded directory /var/run/netns. Changing this setting requires // that all containers are deleted. NetNSMountsUnderStateDir bool `toml:"netns_mounts_under_state_dir" json:"netnsMountsUnderStateDir"` + // EnableUnprivilegedPorts configures net.ipv4.ip_unprivileged_port_start=0 + // for all containers which are not using host network + // and if it is not overwritten by PodSandboxConfig + // Note that currently default is set to disabled but target change it in future, see: + // https://github.com/kubernetes/kubernetes/issues/102612 + EnableUnprivilegedPorts bool `toml:"enable_unprivileged_ports" json:"enableUnprivilegedPorts"` + // EnableUnprivilegedICMP configures net.ipv4.ping_group_range="0 2147483647" + // for all containers which are not using host network, are not running in user namespace + // and if it is not overwritten by PodSandboxConfig + // Note that currently default is set to disabled but target change it in future together with EnableUnprivilegedPorts + EnableUnprivilegedICMP bool `toml:"enable_unprivileged_icmp" json:"enableUnprivilegedICMP"` } // X509KeyPairStreaming contains the x509 configuration for streaming @@ -311,7 +347,7 @@ func ValidatePluginConfig(ctx context.Context, c *PluginConfig) error { if c.ContainerdConfig.UntrustedWorkloadRuntime.Type != "" { log.G(ctx).Warning("`untrusted_workload_runtime` is deprecated, please use `untrusted` runtime in `runtimes` instead") if _, ok := c.ContainerdConfig.Runtimes[RuntimeUntrusted]; ok { - return errors.Errorf("conflicting definitions: configuration includes both `untrusted_workload_runtime` and `runtimes[%q]`", RuntimeUntrusted) + return fmt.Errorf("conflicting definitions: configuration includes both `untrusted_workload_runtime` and `runtimes[%q]`", RuntimeUntrusted) } c.ContainerdConfig.Runtimes[RuntimeUntrusted] = c.ContainerdConfig.UntrustedWorkloadRuntime } @@ -328,19 +364,19 @@ func ValidatePluginConfig(ctx context.Context, c *PluginConfig) error { return errors.New("`default_runtime_name` is empty") } if _, ok := c.ContainerdConfig.Runtimes[c.ContainerdConfig.DefaultRuntimeName]; !ok { - return errors.Errorf("no corresponding runtime configured in `containerd.runtimes` for `containerd` `default_runtime_name = \"%s\"", c.ContainerdConfig.DefaultRuntimeName) + return fmt.Errorf("no corresponding runtime configured in `containerd.runtimes` for `containerd` `default_runtime_name = \"%s\"", c.ContainerdConfig.DefaultRuntimeName) } // Validation for deprecated runtime options. if c.SystemdCgroup { if c.ContainerdConfig.Runtimes[c.ContainerdConfig.DefaultRuntimeName].Type != plugin.RuntimeLinuxV1 { - return errors.Errorf("`systemd_cgroup` only works for runtime %s", plugin.RuntimeLinuxV1) + return fmt.Errorf("`systemd_cgroup` only works for runtime %s", plugin.RuntimeLinuxV1) } log.G(ctx).Warning("`systemd_cgroup` is deprecated, please use runtime `options` instead") } if c.NoPivot { if c.ContainerdConfig.Runtimes[c.ContainerdConfig.DefaultRuntimeName].Type != plugin.RuntimeLinuxV1 { - return errors.Errorf("`no_pivot` only works for runtime %s", plugin.RuntimeLinuxV1) + return fmt.Errorf("`no_pivot` only works for runtime %s", plugin.RuntimeLinuxV1) } // NoPivot can't be deprecated yet, because there is no alternative config option // for `io.containerd.runtime.v1.linux`. @@ -348,13 +384,13 @@ func ValidatePluginConfig(ctx context.Context, c *PluginConfig) error { for _, r := range c.ContainerdConfig.Runtimes { if r.Engine != "" { if r.Type != plugin.RuntimeLinuxV1 { - return errors.Errorf("`runtime_engine` only works for runtime %s", plugin.RuntimeLinuxV1) + return fmt.Errorf("`runtime_engine` only works for runtime %s", plugin.RuntimeLinuxV1) } log.G(ctx).Warning("`runtime_engine` is deprecated, please use runtime `options` instead") } if r.Root != "" { if r.Type != plugin.RuntimeLinuxV1 { - return errors.Errorf("`runtime_root` only works for runtime %s", plugin.RuntimeLinuxV1) + return fmt.Errorf("`runtime_root` only works for runtime %s", plugin.RuntimeLinuxV1) } log.G(ctx).Warning("`runtime_root` is deprecated, please use runtime `options` instead") } @@ -363,7 +399,7 @@ func ValidatePluginConfig(ctx context.Context, c *PluginConfig) error { useConfigPath := c.Registry.ConfigPath != "" if len(c.Registry.Mirrors) > 0 { if useConfigPath { - return errors.Errorf("`mirrors` cannot be set when `config_path` is provided") + return errors.New("`mirrors` cannot be set when `config_path` is provided") } log.G(ctx).Warning("`mirrors` is deprecated, please use `config_path` instead") } @@ -376,7 +412,7 @@ func ValidatePluginConfig(ctx context.Context, c *PluginConfig) error { } if hasDeprecatedTLS { if useConfigPath { - return errors.Errorf("`configs.tls` cannot be set when `config_path` is provided") + return errors.New("`configs.tls` cannot be set when `config_path` is provided") } log.G(ctx).Warning("`configs.tls` is deprecated, please use `config_path` instead") } @@ -390,7 +426,7 @@ func ValidatePluginConfig(ctx context.Context, c *PluginConfig) error { auth := auth u, err := url.Parse(endpoint) if err != nil { - return errors.Wrapf(err, "failed to parse registry url %q from `registry.auths`", endpoint) + return fmt.Errorf("failed to parse registry url %q from `registry.auths`: %w", endpoint, err) } if u.Scheme != "" { // Do not include the scheme in the new registry config. @@ -406,7 +442,7 @@ func ValidatePluginConfig(ctx context.Context, c *PluginConfig) error { // Validation for stream_idle_timeout if c.StreamIdleTimeout != "" { if _, err := time.ParseDuration(c.StreamIdleTimeout); err != nil { - return errors.Wrap(err, "invalid stream idle timeout") + return fmt.Errorf("invalid stream idle timeout: %w", err) } } return nil diff --git a/pkg/cri/config/config_unix.go b/pkg/cri/config/config_unix.go index 3ca1232..f22e057 100644 --- a/pkg/cri/config/config_unix.go +++ b/pkg/cri/config/config_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -91,7 +92,7 @@ func DefaultConfig() PluginConfig { TLSKeyFile: "", TLSCertFile: "", }, - SandboxImage: "k8s.gcr.io/pause:3.5", + SandboxImage: "registry.k8s.io/pause:3.6", StatsCollectPeriod: 10, SystemdCgroup: false, MaxContainerLogLineSize: 16 * 1024, diff --git a/pkg/cri/config/config_windows.go b/pkg/cri/config/config_windows.go index 535a65a..c44bac9 100644 --- a/pkg/cri/config/config_windows.go +++ b/pkg/cri/config/config_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -54,7 +52,7 @@ func DefaultConfig() PluginConfig { TLSKeyFile: "", TLSCertFile: "", }, - SandboxImage: "k8s.gcr.io/pause:3.5", + SandboxImage: "registry.k8s.io/pause:3.6", StatsCollectPeriod: 10, MaxContainerLogLineSize: 16 * 1024, MaxConcurrentDownloads: 3, diff --git a/pkg/cri/constants/constants.go b/pkg/cri/constants/constants.go index b382215..176a0e6 100644 --- a/pkg/cri/constants/constants.go +++ b/pkg/cri/constants/constants.go @@ -16,11 +16,11 @@ package constants -// TODO(random-liu): Merge annotations package into this package. - const ( // K8sContainerdNamespace is the namespace we use to connect containerd. K8sContainerdNamespace = "k8s.io" - // CRIVersion is the CRI version supported by the CRI plugin. - CRIVersion = "v1alpha2" + // CRIVersion is the latest CRI version supported by the CRI plugin. + CRIVersion = "v1" + // CRIVersionAlpha is the alpha version of CRI supported by the CRI plugin. + CRIVersionAlpha = "v1alpha2" ) diff --git a/pkg/cri/cri.go b/pkg/cri/cri.go index 2307302..f89b23b 100644 --- a/pkg/cri/cri.go +++ b/pkg/cri/cri.go @@ -18,6 +18,7 @@ package cri import ( "flag" + "fmt" "path/filepath" "github.com/containerd/containerd" @@ -35,17 +36,13 @@ import ( "github.com/containerd/containerd/services" "github.com/containerd/containerd/snapshots" imagespec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" "k8s.io/klog/v2" criconfig "github.com/containerd/containerd/pkg/cri/config" "github.com/containerd/containerd/pkg/cri/constants" - criplatforms "github.com/containerd/containerd/pkg/cri/platforms" "github.com/containerd/containerd/pkg/cri/server" ) -// TODO(random-liu): Use github.com/pkg/errors for our errors. // Register CRI service plugin func init() { config := criconfig.DefaultConfig() @@ -54,6 +51,7 @@ func init() { ID: "cri", Config: &config, Requires: []plugin.Type{ + plugin.EventPlugin, plugin.ServicePlugin, }, InitFn: initCRIService, @@ -61,12 +59,13 @@ func init() { } func initCRIService(ic *plugin.InitContext) (interface{}, error) { + ready := ic.RegisterReadiness() ic.Meta.Platforms = []imagespec.Platform{platforms.DefaultSpec()} - ic.Meta.Exports = map[string]string{"CRIVersion": constants.CRIVersion} + ic.Meta.Exports = map[string]string{"CRIVersion": constants.CRIVersion, "CRIVersionAlpha": constants.CRIVersionAlpha} ctx := ic.Context pluginConfig := ic.Config.(*criconfig.PluginConfig) if err := criconfig.ValidatePluginConfig(ctx, pluginConfig); err != nil { - return nil, errors.Wrap(err, "invalid plugin config") + return nil, fmt.Errorf("invalid plugin config: %w", err) } c := criconfig.Config{ @@ -79,32 +78,32 @@ func initCRIService(ic *plugin.InitContext) (interface{}, error) { log.G(ctx).Infof("Start cri plugin with config %+v", c) if err := setGLogLevel(); err != nil { - return nil, errors.Wrap(err, "failed to set glog level") + return nil, fmt.Errorf("failed to set glog level: %w", err) } servicesOpts, err := getServicesOpts(ic) if err != nil { - return nil, errors.Wrap(err, "failed to get services") + return nil, fmt.Errorf("failed to get services: %w", err) } log.G(ctx).Info("Connect containerd service") client, err := containerd.New( "", containerd.WithDefaultNamespace(constants.K8sContainerdNamespace), - containerd.WithDefaultPlatform(criplatforms.Default()), + containerd.WithDefaultPlatform(platforms.Default()), containerd.WithServices(servicesOpts...), ) if err != nil { - return nil, errors.Wrap(err, "failed to create containerd client") + return nil, fmt.Errorf("failed to create containerd client: %w", err) } s, err := server.NewCRIService(c, client) if err != nil { - return nil, errors.Wrap(err, "failed to create CRI service") + return nil, fmt.Errorf("failed to create CRI service: %w", err) } go func() { - if err := s.Run(); err != nil { + if err := s.Run(ready); err != nil { log.G(ctx).WithError(err).Fatal("Failed to run CRI service") } // TODO(random-liu): Whether and how we can stop containerd. @@ -116,51 +115,56 @@ func initCRIService(ic *plugin.InitContext) (interface{}, error) { func getServicesOpts(ic *plugin.InitContext) ([]containerd.ServicesOpt, error) { plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { - return nil, errors.Wrap(err, "failed to get service plugin") + return nil, fmt.Errorf("failed to get service plugin: %w", err) + } + + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, fmt.Errorf("failed to get event plugin: %w", err) } opts := []containerd.ServicesOpt{ - containerd.WithEventService(ic.Events), + containerd.WithEventService(ep.(containerd.EventService)), } for s, fn := range map[string]func(interface{}) containerd.ServicesOpt{ services.ContentService: func(s interface{}) containerd.ServicesOpt { return containerd.WithContentStore(s.(content.Store)) }, services.ImagesService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithImageService(s.(images.ImagesClient)) + return containerd.WithImageClient(s.(images.ImagesClient)) }, services.SnapshotsService: func(s interface{}) containerd.ServicesOpt { return containerd.WithSnapshotters(s.(map[string]snapshots.Snapshotter)) }, services.ContainersService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithContainerService(s.(containers.ContainersClient)) + return containerd.WithContainerClient(s.(containers.ContainersClient)) }, services.TasksService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithTaskService(s.(tasks.TasksClient)) + return containerd.WithTaskClient(s.(tasks.TasksClient)) }, services.DiffService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithDiffService(s.(diff.DiffClient)) + return containerd.WithDiffClient(s.(diff.DiffClient)) }, services.NamespacesService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithNamespaceService(s.(namespaces.NamespacesClient)) + return containerd.WithNamespaceClient(s.(namespaces.NamespacesClient)) }, services.LeasesService: func(s interface{}) containerd.ServicesOpt { return containerd.WithLeasesService(s.(leases.Manager)) }, services.IntrospectionService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithIntrospectionService(s.(introspectionapi.IntrospectionClient)) + return containerd.WithIntrospectionClient(s.(introspectionapi.IntrospectionClient)) }, } { p := plugins[s] if p == nil { - return nil, errors.Errorf("service %q not found", s) + return nil, fmt.Errorf("service %q not found", s) } i, err := p.Instance() if err != nil { - return nil, errors.Wrapf(err, "failed to get instance of service %q", s) + return nil, fmt.Errorf("failed to get instance of service %q: %w", s, err) } if i == nil { - return nil, errors.Errorf("instance of service %q not found", s) + return nil, fmt.Errorf("instance of service %q not found", s) } opts = append(opts, fn(i)) } @@ -169,24 +173,21 @@ func getServicesOpts(ic *plugin.InitContext) ([]containerd.ServicesOpt, error) { // Set glog level. func setGLogLevel() error { - l := logrus.GetLevel() + l := log.GetLevel() fs := flag.NewFlagSet("klog", flag.PanicOnError) klog.InitFlags(fs) if err := fs.Set("logtostderr", "true"); err != nil { return err } switch l { - case logrus.TraceLevel: + case log.TraceLevel: return fs.Set("v", "5") - case logrus.DebugLevel: + case log.DebugLevel: return fs.Set("v", "4") - case logrus.InfoLevel: + case log.InfoLevel: return fs.Set("v", "2") - // glog doesn't support following filters. Defaults to v=0. - case logrus.WarnLevel: - case logrus.ErrorLevel: - case logrus.FatalLevel: - case logrus.PanicLevel: + default: + // glog doesn't support other filters. Defaults to v=0. } return nil } diff --git a/pkg/cri/io/helpers.go b/pkg/cri/io/helpers.go index 59d4141..2cdc97e 100644 --- a/pkg/cri/io/helpers.go +++ b/pkg/cri/io/helpers.go @@ -25,7 +25,7 @@ import ( "github.com/containerd/containerd/cio" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // AttachOptions specifies how to attach to a container. diff --git a/pkg/cri/io/helpers_unix.go b/pkg/cri/io/helpers_unix.go index 2780b95..f0f9084 100644 --- a/pkg/cri/io/helpers_unix.go +++ b/pkg/cri/io/helpers_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/cri/io/helpers_windows.go b/pkg/cri/io/helpers_windows.go index dcc9fe6..b6c9c12 100644 --- a/pkg/cri/io/helpers_windows.go +++ b/pkg/cri/io/helpers_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,13 +17,13 @@ package io import ( + "fmt" "io" "net" "os" "sync" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -62,7 +60,7 @@ func openPipe(ctx context.Context, fn string, flag int, perm os.FileMode) (io.Re func (p *pipe) Write(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Write(b) } @@ -70,7 +68,7 @@ func (p *pipe) Write(b []byte) (int, error) { func (p *pipe) Read(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Read(b) } diff --git a/pkg/cri/io/logger.go b/pkg/cri/io/logger.go index 27721e7..3f905e4 100644 --- a/pkg/cri/io/logger.go +++ b/pkg/cri/io/logger.go @@ -21,11 +21,10 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "time" "github.com/sirupsen/logrus" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cioutil "github.com/containerd/containerd/pkg/ioutil" ) @@ -43,7 +42,7 @@ const ( // NewDiscardLogger creates logger which discards all the input. func NewDiscardLogger() io.WriteCloser { - return cioutil.NewNopWriteCloser(ioutil.Discard) + return cioutil.NewNopWriteCloser(io.Discard) } // NewCRILogger returns a write closer which redirect container log into @@ -144,7 +143,10 @@ func redirectLogs(path string, rc io.ReadCloser, w io.Writer, s StreamType, maxL lineBuffer.Write(l) } lineBuffer.WriteByte(eol) - if _, err := lineBuffer.WriteTo(w); err != nil { + if n, err := lineBuffer.WriteTo(w); err == nil { + outputEntries.Inc() + outputBytes.Inc(float64(n)) + } else { logrus.WithError(err).Errorf("Fail to write %q log to log file %q", s, path) // Continue on write error to drain the container output. } @@ -154,6 +156,8 @@ func redirectLogs(path string, rc io.ReadCloser, w io.Writer, s StreamType, maxL newLine, isPrefix, err := readLine(r) // NOTE(random-liu): readLine can return actual content even if there is an error. if len(newLine) > 0 { + inputEntries.Inc() + inputBytes.Inc(float64(len(newLine))) // Buffer returned by ReadLine will change after // next read, copy it. l := make([]byte, len(newLine)) @@ -184,6 +188,7 @@ func redirectLogs(path string, rc io.ReadCloser, w io.Writer, s StreamType, maxL } buf[len(buf)-1] = last[:len(last)-exceedLen] writeLineBuffer(partial, buf) + splitEntries.Inc() buf = [][]byte{last[len(last)-exceedLen:]} length = exceedLen } diff --git a/pkg/cri/io/logger_test.go b/pkg/cri/io/logger_test.go index f63e274..e4eaccb 100644 --- a/pkg/cri/io/logger_test.go +++ b/pkg/cri/io/logger_test.go @@ -18,14 +18,14 @@ package io import ( "bytes" - "io/ioutil" + "io" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cioutil "github.com/containerd/containerd/pkg/ioutil" ) @@ -237,7 +237,7 @@ func TestRedirectLogs(t *testing.T) { }, } { t.Logf("TestCase %q", desc) - rc := ioutil.NopCloser(strings.NewReader(test.input)) + rc := io.NopCloser(strings.NewReader(test.input)) buf := bytes.NewBuffer(nil) wc := cioutil.NewNopWriteCloser(buf) redirectLogs("test-path", rc, wc, test.stream, test.maxLen) diff --git a/pkg/cri/io/metrics.go b/pkg/cri/io/metrics.go new file mode 100644 index 0000000..cc3f366 --- /dev/null +++ b/pkg/cri/io/metrics.go @@ -0,0 +1,42 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package io + +import "github.com/docker/go-metrics" + +var ( + inputEntries metrics.Counter + outputEntries metrics.Counter + inputBytes metrics.Counter + outputBytes metrics.Counter + splitEntries metrics.Counter +) + +func init() { + // These CRI metrics record input and output logging volume. + ns := metrics.NewNamespace("containerd", "cri", nil) + + inputEntries = ns.NewCounter("input_entries", "Number of log entries received") + outputEntries = ns.NewCounter("output_entries", "Number of log entries successfully written to disk") + inputBytes = ns.NewCounter("input_bytes", "Size of logs received") + outputBytes = ns.NewCounter("output_bytes", "Size of logs successfully written to disk") + splitEntries = ns.NewCounter("split_entries", "Number of extra log entries created by splitting the "+ + "original log entry. This happens when the original log entry exceeds length limit. "+ + "This metric does not count the original log entry.") + + metrics.Register(ns) +} diff --git a/vendor/github.com/containerd/cgroups/state.go b/pkg/cri/labels/labels.go similarity index 50% rename from vendor/github.com/containerd/cgroups/state.go rename to pkg/cri/labels/labels.go index cfeabbb..45cdb03 100644 --- a/vendor/github.com/containerd/cgroups/state.go +++ b/pkg/cri/labels/labels.go @@ -14,15 +14,17 @@ limitations under the License. */ -package cgroups - -// State is a type that represents the state of the current cgroup -type State string +package labels const ( - Unknown State = "" - Thawed State = "thawed" - Frozen State = "frozen" - Freezing State = "freezing" - Deleted State = "deleted" + // criContainerdPrefix is common prefix for cri-containerd + criContainerdPrefix = "io.cri-containerd" + // ImageLabelKey is the label key indicating the image is managed by cri plugin. + ImageLabelKey = criContainerdPrefix + ".image" + // ImageLabelValue is the label value indicating the image is managed by cri plugin. + ImageLabelValue = "managed" + // PinnedImageLabelKey is the label value indicating the image is pinned. + PinnedImageLabelKey = criContainerdPrefix + ".pinned" + // PinnedImageLabelValue is the label value indicating the image is pinned. + PinnedImageLabelValue = "pinned" ) diff --git a/pkg/cri/opts/container.go b/pkg/cri/opts/container.go index 5a4c94b..5ea1b87 100644 --- a/pkg/cri/opts/container.go +++ b/pkg/cri/opts/container.go @@ -18,8 +18,12 @@ package opts import ( "context" - "io/ioutil" + "errors" + "fmt" "os" + "path/filepath" + goruntime "runtime" + "strings" "github.com/containerd/containerd" "github.com/containerd/containerd/containers" @@ -28,11 +32,10 @@ import ( "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs" - "github.com/pkg/errors" ) // WithNewSnapshot wraps `containerd.WithNewSnapshot` so that if creating the -// snapshot fails we make sure the image is actually unpacked and and retry. +// snapshot fails we make sure the image is actually unpacked and retry. func WithNewSnapshot(id string, i containerd.Image, opts ...snapshots.Opt) containerd.NewContainerOpts { f := containerd.WithNewSnapshot(id, i, opts...) return func(ctx context.Context, client *containerd.Client, c *containers.Container) error { @@ -42,7 +45,7 @@ func WithNewSnapshot(id string, i containerd.Image, opts ...snapshots.Opt) conta } if err := i.Unpack(ctx, c.Snapshotter); err != nil { - return errors.Wrap(err, "error unpacking image") + return fmt.Errorf("error unpacking image: %w", err) } return f(ctx, client, c) } @@ -66,7 +69,13 @@ func WithVolumes(volumeMounts map[string]string) containerd.NewContainerOpts { if err != nil { return err } - root, err := ioutil.TempDir("", "ctd-volume") + // Since only read is needed, append ReadOnly mount option to prevent linux kernel + // from syncing whole filesystem in umount syscall. + if len(mounts) == 1 && mounts[0].Type == "overlay" { + mounts[0].Options = append(mounts[0].Options, "ro") + } + + root, err := os.MkdirTemp("", "ctd-volume") if err != nil { return err } @@ -74,33 +83,55 @@ func WithVolumes(volumeMounts map[string]string) containerd.NewContainerOpts { // if it fails but not RM snapshot data. // refer to https://github.com/containerd/containerd/pull/1868 // https://github.com/containerd/containerd/pull/1785 - defer os.Remove(root) // nolint: errcheck - if err := mount.All(mounts, root); err != nil { - return errors.Wrap(err, "failed to mount") - } - defer func() { - if uerr := mount.Unmount(root, 0); uerr != nil { - log.G(ctx).WithError(uerr).Errorf("Failed to unmount snapshot %q", c.SnapshotKey) + defer os.Remove(root) + + unmounter := func(mountPath string) { + if uerr := mount.Unmount(mountPath, 0); uerr != nil { + log.G(ctx).WithError(uerr).Errorf("Failed to unmount snapshot %q", root) if err == nil { err = uerr } } - }() + } + + var mountPaths []string + if goruntime.GOOS == "windows" { + for _, m := range mounts { + // appending the layerID to the root. + mountPath := filepath.Join(root, filepath.Base(m.Source)) + mountPaths = append(mountPaths, mountPath) + if err := m.Mount(mountPath); err != nil { + return err + } + + defer unmounter(m.Source) + } + } else { + mountPaths = append(mountPaths, root) + if err := mount.All(mounts, root); err != nil { + return fmt.Errorf("failed to mount: %w", err) + } + defer unmounter(root) + } for host, volume := range volumeMounts { - src, err := fs.RootPath(root, volume) - if err != nil { - return errors.Wrapf(err, "rootpath on root %s, volume %s", root, volume) - } - if _, err := os.Stat(src); err != nil { - if os.IsNotExist(err) { - // Skip copying directory if it does not exist. - continue + // The volume may have been defined with a C: prefix, which we can't use here. + volume = strings.TrimPrefix(volume, "C:") + for _, mountPath := range mountPaths { + src, err := fs.RootPath(mountPath, volume) + if err != nil { + return fmt.Errorf("rootpath on mountPath %s, volume %s: %w", mountPath, volume, err) + } + if _, err := os.Stat(src); err != nil { + if os.IsNotExist(err) { + // Skip copying directory if it does not exist. + continue + } + return fmt.Errorf("stat volume in rootfs: %w", err) + } + if err := copyExistingContents(src, host); err != nil { + return fmt.Errorf("taking runtime copy of volume: %w", err) } - return errors.Wrap(err, "stat volume in rootfs") - } - if err := copyExistingContents(src, host); err != nil { - return errors.Wrap(err, "taking runtime copy of volume") } } return nil @@ -110,12 +141,12 @@ func WithVolumes(volumeMounts map[string]string) containerd.NewContainerOpts { // copyExistingContents copies from the source to the destination and // ensures the ownership is appropriately set. func copyExistingContents(source, destination string) error { - dstList, err := ioutil.ReadDir(destination) + dstList, err := os.ReadDir(destination) if err != nil { return err } if len(dstList) != 0 { - return errors.Errorf("volume at %q is not initially empty", destination) + return fmt.Errorf("volume at %q is not initially empty", destination) } return fs.CopyDir(destination, source, fs.WithXAttrExclude("security.selinux")) } diff --git a/pkg/cri/opts/spec.go b/pkg/cri/opts/spec.go index 1afb4fc..4ad1f1c 100644 --- a/pkg/cri/opts/spec.go +++ b/pkg/cri/opts/spec.go @@ -18,17 +18,16 @@ package opts import ( "context" + "errors" "os" "path/filepath" "strings" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // DefaultSandboxCPUshares is default cpu shares for sandbox container. @@ -64,7 +63,9 @@ func WithProcessArgs(config *runtime.ContainerConfig, image *imagespec.ImageConf args = append([]string{}, image.Cmd...) } if command == nil { - command = append([]string{}, image.Entrypoint...) + if !(len(image.Entrypoint) == 1 && image.Entrypoint[0] == "") { + command = append([]string{}, image.Entrypoint...) + } } } if len(command) == 0 && len(args) == 0 { @@ -76,7 +77,8 @@ func WithProcessArgs(config *runtime.ContainerConfig, image *imagespec.ImageConf // mounts defines how to sort runtime.Mount. // This is the same with the Docker implementation: -// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26 +// +// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26 type orderedMounts []*runtime.Mount // Len returns the number of mounts. Used in sorting. diff --git a/pkg/cri/opts/spec_linux.go b/pkg/cri/opts/spec_linux.go index 84c16b6..97d79bf 100644 --- a/pkg/cri/opts/spec_linux.go +++ b/pkg/cri/opts/spec_linux.go @@ -18,8 +18,8 @@ package opts import ( "context" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -28,16 +28,16 @@ import ( "sync" "syscall" + "github.com/containerd/cgroups" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/oci" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/util" osinterface "github.com/containerd/containerd/pkg/os" @@ -160,17 +160,17 @@ func WithMounts(osi osinterface.OS, config *runtime.ContainerConfig, extra []*ru // TODO(random-liu): Add CRI validation test for this case. if _, err := osi.Stat(src); err != nil { if !os.IsNotExist(err) { - return errors.Wrapf(err, "failed to stat %q", src) + return fmt.Errorf("failed to stat %q: %w", src, err) } if err := osi.MkdirAll(src, 0755); err != nil { - return errors.Wrapf(err, "failed to mkdir %q", src) + return fmt.Errorf("failed to mkdir %q: %w", src, err) } } // TODO(random-liu): Add cri-containerd integration test or cri validation test // for this. src, err := osi.ResolveSymbolicLink(src) if err != nil { - return errors.Wrapf(err, "failed to resolve symlink %q", src) + return fmt.Errorf("failed to resolve symlink %q: %w", src, err) } if s.Linux == nil { s.Linux = &runtimespec.Linux{} @@ -211,7 +211,7 @@ func WithMounts(osi osinterface.OS, config *runtime.ContainerConfig, extra []*ru if mount.GetSelinuxRelabel() { if err := label.Relabel(src, mountLabel, false); err != nil && err != unix.ENOTSUP { - return errors.Wrapf(err, "relabel %q with %q failed", src, mountLabel) + return fmt.Errorf("relabel %q with %q failed: %w", src, mountLabel, err) } } s.Mounts = append(s.Mounts, runtimespec.Mount{ @@ -240,7 +240,7 @@ func ensureShared(path string, lookupMount func(string) (mount.Info, error)) err } } - return errors.Errorf("path %q is mounted on %q but it is not a shared mount", path, mountInfo.Mountpoint) + return fmt.Errorf("path %q is mounted on %q but it is not a shared mount", path, mountInfo.Mountpoint) } // ensure mount point on which path is mounted, is either shared or slave. @@ -258,11 +258,33 @@ func ensureSharedOrSlave(path string, lookupMount func(string) (mount.Info, erro return nil } } - return errors.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, mountInfo.Mountpoint) + return fmt.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, mountInfo.Mountpoint) +} + +// getDeviceUserGroupID() is used to find the right uid/gid +// value for the device node created in the container namespace. +// The runtime executes mknod() and chmod()s the created +// device with the values returned here. +// +// On Linux, uid and gid are sufficient and the user/groupname do not +// need to be resolved. +// +// TODO(mythi): In case of user namespaces, the runtime simply bind +// mounts the devices from the host. Additional logic is needed +// to check that the runtimes effective UID/GID on the host has the +// permissions to access the device node and/or the right user namespace +// mappings are created. +// +// Ref: https://github.com/kubernetes/kubernetes/issues/92211 +func getDeviceUserGroupID(runAsVal *runtime.Int64Value) uint32 { + if runAsVal != nil { + return uint32(runAsVal.GetValue()) + } + return 0 } // WithDevices sets the provided devices onto the container spec -func WithDevices(osi osinterface.OS, config *runtime.ContainerConfig) oci.SpecOpts { +func WithDevices(osi osinterface.OS, config *runtime.ContainerConfig, enableDeviceOwnershipFromSecurityContext bool) oci.SpecOpts { return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) { if s.Linux == nil { s.Linux = &runtimespec.Linux{} @@ -271,6 +293,8 @@ func WithDevices(osi osinterface.OS, config *runtime.ContainerConfig) oci.SpecOp s.Linux.Resources = &runtimespec.LinuxResources{} } + oldDevices := len(s.Linux.Devices) + for _, device := range config.GetDevices() { path, err := osi.ResolveSymbolicLink(device.HostPath) if err != nil { @@ -282,6 +306,24 @@ func WithDevices(osi osinterface.OS, config *runtime.ContainerConfig) oci.SpecOp return err } } + + if enableDeviceOwnershipFromSecurityContext { + UID := getDeviceUserGroupID(config.GetLinux().GetSecurityContext().GetRunAsUser()) + GID := getDeviceUserGroupID(config.GetLinux().GetSecurityContext().GetRunAsGroup()) + // Loop all new devices added by oci.WithDevices() to update their + // dev.UID/dev.GID. + // + // non-zero UID/GID from SecurityContext is used to override host's + // device UID/GID for the container. + for idx := oldDevices; idx < len(s.Linux.Devices); idx++ { + if UID != 0 { + *s.Linux.Devices[idx].UID = UID + } + if GID != 0 { + *s.Linux.Devices[idx].GID = GID + } + } + } return nil } } @@ -362,6 +404,37 @@ func WithSelinuxLabels(process, mount string) oci.SpecOpts { } } +var ( + swapControllerAvailability bool + swapControllerAvailabilityOnce sync.Once +) + +// SwapControllerAvailable returns true if the swap controller is available +func SwapControllerAvailable() bool { + swapControllerAvailabilityOnce.Do(func() { + const warn = "Failed to detect the availability of the swap controller, assuming not available" + p := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + if cgroups.Mode() == cgroups.Unified { + // memory.swap.max does not exist in the cgroup root, so we check /sys/fs/cgroup//memory.swap.max + _, unified, err := cgroups.ParseCgroupFileUnified("/proc/self/cgroup") + if err != nil { + err = fmt.Errorf("failed to parse /proc/self/cgroup: %w", err) + logrus.WithError(err).Warn(warn) + return + } + p = filepath.Join("/sys/fs/cgroup", unified, "memory.swap.max") + } + if _, err := os.Stat(p); err != nil { + if !errors.Is(err, os.ErrNotExist) { + logrus.WithError(err).Warn(warn) + } + return + } + swapControllerAvailability = true + }) + return swapControllerAvailability +} + // WithResources sets the provided resource restrictions func WithResources(resources *runtime.LinuxContainerResources, tolerateMissingHugetlbController, disableHugetlbController bool) oci.SpecOpts { return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) { @@ -385,6 +458,7 @@ func WithResources(resources *runtime.LinuxContainerResources, tolerateMissingHu q = resources.GetCpuQuota() shares = uint64(resources.GetCpuShares()) limit = resources.GetMemoryLimitInBytes() + swapLimit = resources.GetMemorySwapLimitInBytes() hugepages = resources.GetHugepageLimits() ) @@ -405,7 +479,15 @@ func WithResources(resources *runtime.LinuxContainerResources, tolerateMissingHu } if limit != 0 { s.Linux.Resources.Memory.Limit = &limit + // swap/memory limit should be equal to prevent container from swapping by default + if swapLimit == 0 && SwapControllerAvailable() { + s.Linux.Resources.Memory.Swap = &limit + } } + if swapLimit != 0 && SwapControllerAvailable() { + s.Linux.Resources.Memory.Swap = &swapLimit + } + if !disableHugetlbController { if isHugetlbControllerPresent() { for _, limit := range hugepages { @@ -416,12 +498,21 @@ func WithResources(resources *runtime.LinuxContainerResources, tolerateMissingHu } } else { if !tolerateMissingHugetlbController { - return errors.Errorf("huge pages limits are specified but hugetlb cgroup controller is missing. " + + return errors.New("huge pages limits are specified but hugetlb cgroup controller is missing. " + "Please set tolerate_missing_hugetlb_controller to `true` to ignore this error") } logrus.Warn("hugetlb cgroup controller is absent. skipping huge pages limits") } } + + if unified := resources.GetUnified(); unified != nil { + if s.Linux.Resources.Unified == nil { + s.Linux.Resources.Unified = make(map[string]string) + } + for k, v := range unified { + s.Linux.Resources.Unified[k] = v + } + } return nil } } @@ -458,8 +549,8 @@ var ( // cgroup v1. func cgroupv1HasHugetlb() (bool, error) { _cgroupv1HasHugetlbOnce.Do(func() { - if _, err := ioutil.ReadDir("/sys/fs/cgroup/hugetlb"); err != nil { - _cgroupv1HasHugetlbErr = errors.Wrap(err, "readdir /sys/fs/cgroup/hugetlb") + if _, err := os.ReadDir("/sys/fs/cgroup/hugetlb"); err != nil { + _cgroupv1HasHugetlbErr = fmt.Errorf("readdir /sys/fs/cgroup/hugetlb: %w", err) _cgroupv1HasHugetlb = false } else { _cgroupv1HasHugetlbErr = nil @@ -473,9 +564,9 @@ func cgroupv1HasHugetlb() (bool, error) { // cgroup v2. func cgroupv2HasHugetlb() (bool, error) { _cgroupv2HasHugetlbOnce.Do(func() { - controllers, err := ioutil.ReadFile("/sys/fs/cgroup/cgroup.controllers") + controllers, err := os.ReadFile("/sys/fs/cgroup/cgroup.controllers") if err != nil { - _cgroupv2HasHugetlbErr = errors.Wrap(err, "read /sys/fs/cgroup/cgroup.controllers") + _cgroupv2HasHugetlbErr = fmt.Errorf("read /sys/fs/cgroup/cgroup.controllers: %w", err) return } _cgroupv2HasHugetlb = strings.Contains(string(controllers), "hugetlb") @@ -621,14 +712,14 @@ func nullOpt(_ context.Context, _ oci.Client, _ *containers.Container, _ *runtim } func getCurrentOOMScoreAdj() (int, error) { - b, err := ioutil.ReadFile("/proc/self/oom_score_adj") + b, err := os.ReadFile("/proc/self/oom_score_adj") if err != nil { - return 0, errors.Wrap(err, "could not get the daemon oom_score_adj") + return 0, fmt.Errorf("could not get the daemon oom_score_adj: %w", err) } s := strings.TrimSpace(string(b)) i, err := strconv.Atoi(s) if err != nil { - return 0, errors.Wrap(err, "could not get the daemon oom_score_adj") + return 0, fmt.Errorf("could not get the daemon oom_score_adj: %w", err) } return i, nil } diff --git a/pkg/cri/opts/spec_test.go b/pkg/cri/opts/spec_test.go index 3e540c3..3892e4c 100644 --- a/pkg/cri/opts/spec_test.go +++ b/pkg/cri/opts/spec_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestOrderedMounts(t *testing.T) { diff --git a/pkg/cri/opts/spec_windows.go b/pkg/cri/opts/spec_windows.go index f6d4a16..d0368a3 100644 --- a/pkg/cri/opts/spec_windows.go +++ b/pkg/cri/opts/spec_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,15 +18,19 @@ package opts import ( "context" + "errors" + "fmt" + "os" "path/filepath" "sort" "strings" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" + imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "golang.org/x/sys/windows" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" osinterface "github.com/containerd/containerd/pkg/os" ) @@ -62,6 +64,61 @@ func cleanMount(p string) string { return filepath.Clean(p) } +func parseMount(osi osinterface.OS, mount *runtime.Mount) (*runtimespec.Mount, error) { + var ( + dst = mount.GetContainerPath() + src = mount.GetHostPath() + ) + // In the case of a named pipe mount on Windows, don't stat the file + // or do other operations that open it, as that could interfere with + // the listening process. filepath.Clean also breaks named pipe + // paths, so don't use it. + if !namedPipePath(src) { + if _, err := osi.Stat(src); err != nil { + // Create the host path if it doesn't exist. This will align + // the behavior with the Linux implementation, but it doesn't + // align with Docker's behavior on Windows. + if !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to stat %q: %w", src, err) + } + if err := osi.MkdirAll(src, 0755); err != nil { + return nil, fmt.Errorf("failed to mkdir %q: %w", src, err) + } + } + var err error + src, err = osi.ResolveSymbolicLink(src) + if err != nil { + return nil, fmt.Errorf("failed to resolve symlink %q: %w", src, err) + } + // hcsshim requires clean path, especially '/' -> '\'. Additionally, + // for the destination, absolute paths should have the C: prefix. + src = filepath.Clean(src) + + // filepath.Clean adds a '.' at the end if the path is a + // drive (like Z:, E: etc.). Keeping this '.' in the path + // causes incorrect parameter error when starting the + // container on windows. Remove it here. + if !(len(dst) == 2 && dst[1] == ':') { + dst = filepath.Clean(dst) + if dst[0] == '\\' { + dst = "C:" + dst + } + } else if dst[0] == 'c' || dst[0] == 'C' { + return nil, fmt.Errorf("destination path can not be C drive") + } + } + + var options []string + // NOTE(random-liu): we don't change all mounts to `ro` when root filesystem + // is readonly. This is different from docker's behavior, but make more sense. + if mount.GetReadonly() { + options = append(options, "ro") + } else { + options = append(options, "rw") + } + return &runtimespec.Mount{Source: src, Destination: dst, Options: options}, nil +} + // WithWindowsMounts sorts and adds runtime and CRI mounts to the spec for // windows container. func WithWindowsMounts(osi osinterface.OS, config *runtime.ContainerConfig, extra []*runtime.Mount) oci.SpecOpts { @@ -111,44 +168,11 @@ func WithWindowsMounts(osi osinterface.OS, config *runtime.ContainerConfig, extr } for _, mount := range mounts { - var ( - dst = mount.GetContainerPath() - src = mount.GetHostPath() - ) - // In the case of a named pipe mount on Windows, don't stat the file - // or do other operations that open it, as that could interfere with - // the listening process. filepath.Clean also breaks named pipe - // paths, so don't use it. - if !namedPipePath(src) { - if _, err := osi.Stat(src); err != nil { - // If the source doesn't exist, return an error instead - // of creating the source. This aligns with Docker's - // behavior on windows. - return errors.Wrapf(err, "failed to stat %q", src) - } - var err error - src, err = osi.ResolveSymbolicLink(src) - if err != nil { - return errors.Wrapf(err, "failed to resolve symlink %q", src) - } - // hcsshim requires clean path, especially '/' -> '\'. - src = filepath.Clean(src) - dst = filepath.Clean(dst) + parsedMount, err := parseMount(osi, mount) + if err != nil { + return err } - - var options []string - // NOTE(random-liu): we don't change all mounts to `ro` when root filesystem - // is readonly. This is different from docker's behavior, but make more sense. - if mount.GetReadonly() { - options = append(options, "ro") - } else { - options = append(options, "rw") - } - s.Mounts = append(s.Mounts, runtimespec.Mount{ - Source: src, - Destination: dst, - Options: options, - }) + s.Mounts = append(s.Mounts, *parsedMount) } return nil } @@ -166,9 +190,6 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec if s.Windows.Resources == nil { s.Windows.Resources = &runtimespec.WindowsResources{} } - if s.Windows.Resources.CPU == nil { - s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{} - } if s.Windows.Resources.Memory == nil { s.Windows.Resources.Memory = &runtimespec.WindowsMemoryResources{} } @@ -179,6 +200,9 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec max = uint16(resources.GetCpuMaximum()) limit = uint64(resources.GetMemoryLimitInBytes()) ) + if s.Windows.Resources.CPU == nil && (count != 0 || shares != 0 || max != 0) { + s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{} + } if count != 0 { s.Windows.Resources.CPU.Count = &count } @@ -222,3 +246,92 @@ func WithWindowsCredentialSpec(credentialSpec string) oci.SpecOpts { return nil } } + +func escapeAndCombineArgsWindows(args []string) string { + escaped := make([]string, len(args)) + for i, a := range args { + escaped[i] = windows.EscapeArg(a) + } + return strings.Join(escaped, " ") +} + +// WithProcessCommandLineOrArgsForWindows sets the process command line or process args on the spec based on the image +// and runtime config +// If image.ArgsEscaped field is set, this function sets the process command line and if not, it sets the +// process args field +func WithProcessCommandLineOrArgsForWindows(config *runtime.ContainerConfig, image *imagespec.ImageConfig) oci.SpecOpts { + if image.ArgsEscaped { + return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) { + // firstArgFromImg is a flag that is returned to indicate that the first arg in the slice comes from either the + // image Entrypoint or Cmd. If the first arg instead comes from the container config (e.g. overriding the image values), + // it should be false. This is done to support the non-OCI ArgsEscaped field that Docker used to determine how the image + // entrypoint and cmd should be interpreted. + // + args, firstArgFromImg, err := getArgs(image.Entrypoint, image.Cmd, config.GetCommand(), config.GetArgs()) + if err != nil { + return err + } + + var cmdLine string + if image.ArgsEscaped && firstArgFromImg { + cmdLine = args[0] + if len(args) > 1 { + cmdLine += " " + escapeAndCombineArgsWindows(args[1:]) + } + } else { + cmdLine = escapeAndCombineArgsWindows(args) + } + + return oci.WithProcessCommandLine(cmdLine)(ctx, client, c, s) + } + } + // if ArgsEscaped is not set + return func(ctx context.Context, client oci.Client, c *containers.Container, s *runtimespec.Spec) (err error) { + args, _, err := getArgs(image.Entrypoint, image.Cmd, config.GetCommand(), config.GetArgs()) + if err != nil { + return err + } + return oci.WithProcessArgs(args...)(ctx, client, c, s) + } +} + +// getArgs is used to evaluate the overall args for the container by taking into account the image command and entrypoints +// along with the container command and entrypoints specified through the podspec if any +func getArgs(imgEntrypoint []string, imgCmd []string, ctrEntrypoint []string, ctrCmd []string) ([]string, bool, error) { + //nolint:dupword + // firstArgFromImg is a flag that is returned to indicate that the first arg in the slice comes from either the image + // Entrypoint or Cmd. If the first arg instead comes from the container config (e.g. overriding the image values), + // it should be false. + // Essentially this means firstArgFromImg should be true iff: + // Ctr entrypoint ctr cmd image entrypoint image cmd firstArgFromImg + // -------------------------------------------------------------------------------- + // nil nil exists nil true + // nil nil nil exists true + + // This is needed to support the non-OCI ArgsEscaped field used by Docker. ArgsEscaped is used for + // Windows images to indicate that the command has already been escaped and should be + // used directly as the command line. + var firstArgFromImg bool + entrypoint, cmd := ctrEntrypoint, ctrCmd + // The following logic is migrated from https://github.com/moby/moby/blob/master/daemon/commit.go + // TODO(random-liu): Clearly define the commands overwrite behavior. + if len(entrypoint) == 0 { + // Copy array to avoid data race. + if len(cmd) == 0 { + cmd = append([]string{}, imgCmd...) + if len(imgCmd) > 0 { + firstArgFromImg = true + } + } + if entrypoint == nil { + entrypoint = append([]string{}, imgEntrypoint...) + if len(imgEntrypoint) > 0 || len(ctrCmd) == 0 { + firstArgFromImg = true + } + } + } + if len(entrypoint) == 0 && len(cmd) == 0 { + return nil, false, errors.New("no command specified") + } + return append(entrypoint, cmd...), firstArgFromImg, nil +} diff --git a/pkg/cri/opts/spec_windows_test.go b/pkg/cri/opts/spec_windows_test.go new file mode 100644 index 0000000..a2beb6e --- /dev/null +++ b/pkg/cri/opts/spec_windows_test.go @@ -0,0 +1,54 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package opts + +import ( + "fmt" + "strings" + "testing" + + osinterface "github.com/containerd/containerd/pkg/os" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +func TestDriveMounts(t *testing.T) { + tests := []struct { + mnt *runtime.Mount + expectedContainerPath string + expectedError error + }{ + {&runtime.Mount{HostPath: `C:\`, ContainerPath: `D:\foo`}, `D:\foo`, nil}, + {&runtime.Mount{HostPath: `C:\`, ContainerPath: `D:\`}, `D:\`, nil}, + {&runtime.Mount{HostPath: `C:\`, ContainerPath: `D:`}, `D:`, nil}, + {&runtime.Mount{HostPath: `\\.\pipe\a_fake_pipe_name_that_shouldnt_exist`, ContainerPath: `\\.\pipe\foo`}, `\\.\pipe\foo`, nil}, + // If `C:\` is passed as container path it should continue and forward that to HCS and fail + // to align with docker's behavior. + {&runtime.Mount{HostPath: `C:\`, ContainerPath: `C:\`}, `C:\`, nil}, + + // If `C:` is passed we can detect and fail immediately. + {&runtime.Mount{HostPath: `C:\`, ContainerPath: `C:`}, ``, fmt.Errorf("destination path can not be C drive")}, + } + var realOS osinterface.RealOS + for _, test := range tests { + parsedMount, err := parseMount(realOS, test.mnt) + if err != nil && !strings.EqualFold(err.Error(), test.expectedError.Error()) { + t.Fatalf("expected err: %s, got %s instead", test.expectedError, err) + } else if err == nil && test.expectedContainerPath != parsedMount.Destination { + t.Fatalf("expected container path: %s, got %s instead", test.expectedContainerPath, parsedMount.Destination) + } + } +} diff --git a/pkg/cri/server/bandwidth/linux.go b/pkg/cri/server/bandwidth/linux.go index e8d7108..6456c4a 100644 --- a/pkg/cri/server/bandwidth/linux.go +++ b/pkg/cri/server/bandwidth/linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/pkg/cri/server/bandwidth/unsupported.go b/pkg/cri/server/bandwidth/unsupported.go index 12c5ad8..9982070 100644 --- a/pkg/cri/server/bandwidth/unsupported.go +++ b/pkg/cri/server/bandwidth/unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/pkg/cri/server/cni_conf_syncer.go b/pkg/cri/server/cni_conf_syncer.go index 401cb34..60fe1e1 100644 --- a/pkg/cri/server/cni_conf_syncer.go +++ b/pkg/cri/server/cni_conf_syncer.go @@ -17,12 +17,13 @@ package server import ( + "fmt" "os" + "path/filepath" "sync" cni "github.com/containerd/go-cni" "github.com/fsnotify/fsnotify" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,15 +44,22 @@ type cniNetConfSyncer struct { func newCNINetConfSyncer(confDir string, netPlugin cni.CNI, loadOpts []cni.Opt) (*cniNetConfSyncer, error) { watcher, err := fsnotify.NewWatcher() if err != nil { - return nil, errors.Wrap(err, "failed to create fsnotify watcher") + return nil, fmt.Errorf("failed to create fsnotify watcher: %w", err) + } + + // /etc/cni has to be readable for non-root users (0755), because /etc/cni/tuning/allowlist.conf is used for rootless mode too. + // This file was introduced in CNI plugins 1.2.0 (https://github.com/containernetworking/plugins/pull/693), and its path is hard-coded. + confDirParent := filepath.Dir(confDir) + if err := os.MkdirAll(confDirParent, 0755); err != nil { + return nil, fmt.Errorf("failed to create the parent of the cni conf dir=%s: %w", confDirParent, err) } if err := os.MkdirAll(confDir, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to create cni conf dir=%s for watch", confDir) + return nil, fmt.Errorf("failed to create cni conf dir=%s for watch: %w", confDir, err) } if err := watcher.Add(confDir); err != nil { - return nil, errors.Wrapf(err, "failed to watch cni conf dir %s", confDir) + return nil, fmt.Errorf("failed to watch cni conf dir %s: %w", confDir, err) } syncer := &cniNetConfSyncer{ diff --git a/pkg/cri/server/container_attach.go b/pkg/cri/server/container_attach.go index b8b8a2c..a952150 100644 --- a/pkg/cri/server/container_attach.go +++ b/pkg/cri/server/container_attach.go @@ -17,14 +17,14 @@ package server import ( + "fmt" "io" "github.com/containerd/containerd" "github.com/containerd/containerd/log" - "github.com/pkg/errors" "golang.org/x/net/context" "k8s.io/client-go/tools/remotecommand" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cio "github.com/containerd/containerd/pkg/cri/io" ) @@ -33,11 +33,11 @@ import ( func (c *criService) Attach(ctx context.Context, r *runtime.AttachRequest) (*runtime.AttachResponse, error) { cntr, err := c.containerStore.Get(r.GetContainerId()) if err != nil { - return nil, errors.Wrap(err, "failed to find container in store") + return nil, fmt.Errorf("failed to find container in store: %w", err) } state := cntr.Status.Get().State() if state != runtime.ContainerState_CONTAINER_RUNNING { - return nil, errors.Errorf("container is in %s state", criContainerStateToString(state)) + return nil, fmt.Errorf("container is in %s state", criContainerStateToString(state)) } return c.streamServer.GetAttach(r) } @@ -49,18 +49,18 @@ func (c *criService) attachContainer(ctx context.Context, id string, stdin io.Re // Get container from our container store. cntr, err := c.containerStore.Get(id) if err != nil { - return errors.Wrapf(err, "failed to find container %q in store", id) + return fmt.Errorf("failed to find container %q in store: %w", id, err) } id = cntr.ID state := cntr.Status.Get().State() if state != runtime.ContainerState_CONTAINER_RUNNING { - return errors.Errorf("container is in %s state", criContainerStateToString(state)) + return fmt.Errorf("container is in %s state", criContainerStateToString(state)) } task, err := cntr.Container.Task(ctx, nil) if err != nil { - return errors.Wrap(err, "failed to load task") + return fmt.Errorf("failed to load task: %w", err) } handleResizing(ctx, resize, func(size remotecommand.TerminalSize) { if err := task.Resize(ctx, uint32(size.Width), uint32(size.Height)); err != nil { diff --git a/pkg/cri/server/container_checkpoint.go b/pkg/cri/server/container_checkpoint.go new file mode 100644 index 0000000..8a6e799 --- /dev/null +++ b/pkg/cri/server/container_checkpoint.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +func (c *criService) CheckpointContainer(ctx context.Context, r *runtime.CheckpointContainerRequest) (res *runtime.CheckpointContainerResponse, err error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckpointContainer not implemented") +} diff --git a/pkg/cri/server/container_create.go b/pkg/cri/server/container_create.go index f30d3cb..6013fee 100644 --- a/pkg/cri/server/container_create.go +++ b/pkg/cri/server/container_create.go @@ -17,7 +17,10 @@ package server import ( + "errors" + "fmt" "path/filepath" + goruntime "runtime" "time" "github.com/containerd/containerd" @@ -30,9 +33,8 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cio "github.com/containerd/containerd/pkg/cri/io" customopts "github.com/containerd/containerd/pkg/cri/opts" @@ -53,12 +55,12 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta sandboxConfig := r.GetSandboxConfig() sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) if err != nil { - return nil, errors.Wrapf(err, "failed to find sandbox id %q", r.GetPodSandboxId()) + return nil, fmt.Errorf("failed to find sandbox id %q: %w", r.GetPodSandboxId(), err) } sandboxID := sandbox.ID s, err := sandbox.Container.Task(ctx, nil) if err != nil { - return nil, errors.Wrap(err, "failed to get sandbox container task") + return nil, fmt.Errorf("failed to get sandbox container task: %w", err) } sandboxPid := s.Pid() @@ -74,7 +76,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta name := makeContainerName(metadata, sandboxConfig.GetMetadata()) log.G(ctx).Debugf("Generated id %q for container %q", id, name) if err = c.containerNameIndex.Reserve(name, id); err != nil { - return nil, errors.Wrapf(err, "failed to reserve container name %q", name) + return nil, fmt.Errorf("failed to reserve container name %q: %w", name, err) } defer func() { // Release the name if the function returns with an error. @@ -95,24 +97,25 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta // been pulled before creating the container, so do not ensure the image. image, err := c.localResolve(config.GetImage().GetImage()) if err != nil { - return nil, errors.Wrapf(err, "failed to resolve image %q", config.GetImage().GetImage()) + return nil, fmt.Errorf("failed to resolve image %q: %w", config.GetImage().GetImage(), err) } containerdImage, err := c.toContainerdImage(ctx, image) if err != nil { - return nil, errors.Wrapf(err, "failed to get image from containerd %q", image.ID) + return nil, fmt.Errorf("failed to get image from containerd %q: %w", image.ID, err) } + start := time.Now() // Run container using the same runtime with sandbox. sandboxInfo, err := sandbox.Container.Info(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to get sandbox %q info", sandboxID) + return nil, fmt.Errorf("failed to get sandbox %q info: %w", sandboxID, err) } // Create container root directory. containerRootDir := c.getContainerRootDir(id) if err = c.os.MkdirAll(containerRootDir, 0755); err != nil { - return nil, errors.Wrapf(err, "failed to create container root directory %q", - containerRootDir) + return nil, fmt.Errorf("failed to create container root directory %q: %w", + containerRootDir, err) } defer func() { if retErr != nil { @@ -125,8 +128,8 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta }() volatileContainerRootDir := c.getVolatileContainerRootDir(id) if err = c.os.MkdirAll(volatileContainerRootDir, 0755); err != nil { - return nil, errors.Wrapf(err, "failed to create volatile container root directory %q", - volatileContainerRootDir) + return nil, fmt.Errorf("failed to create volatile container root directory %q: %w", + volatileContainerRootDir, err) } defer func() { if retErr != nil { @@ -151,14 +154,14 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta ociRuntime, err := c.getSandboxRuntime(sandboxConfig, sandbox.Metadata.RuntimeHandler) if err != nil { - return nil, errors.Wrap(err, "failed to get sandbox runtime") + return nil, fmt.Errorf("failed to get sandbox runtime: %w", err) } log.G(ctx).Debugf("Use OCI runtime %+v for sandbox %q and container %q", ociRuntime, sandboxID, id) spec, err := c.containerSpec(id, sandboxID, sandboxPid, sandbox.NetNSPath, containerName, containerdImage.Name(), config, sandboxConfig, &image.ImageSpec.Config, append(mounts, volumeMounts...), ociRuntime) if err != nil { - return nil, errors.Wrapf(err, "failed to generate container %q spec", id) + return nil, fmt.Errorf("failed to generate container %q spec: %w", id, err) } meta.ProcessLabel = spec.Process.SelinuxLabel @@ -215,7 +218,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta containerIO, err := cio.NewContainerIO(id, cio.WithNewFIFOs(volatileContainerRootDir, config.GetTty(), config.GetStdin())) if err != nil { - return nil, errors.Wrap(err, "failed to create container io") + return nil, fmt.Errorf("failed to create container io: %w", err) } defer func() { if retErr != nil { @@ -227,14 +230,14 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta specOpts, err := c.containerSpecOpts(config, &image.ImageSpec.Config) if err != nil { - return nil, errors.Wrap(err, "failed to get container spec opts") + return nil, fmt.Errorf("failed to get container spec opts: %w", err) } containerLabels := buildLabels(config.Labels, image.ImageSpec.Config.Labels, containerKindContainer) runtimeOptions, err := getRuntimeOptions(sandboxInfo) if err != nil { - return nil, errors.Wrap(err, "failed to get runtime options") + return nil, fmt.Errorf("failed to get runtime options: %w", err) } opts = append(opts, containerd.WithSpec(spec, specOpts...), @@ -243,7 +246,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta containerd.WithContainerExtension(containerMetadataExtension, &meta)) var cntr containerd.Container if cntr, err = c.client.NewContainer(ctx, id, opts...); err != nil { - return nil, errors.Wrap(err, "failed to create containerd container") + return nil, fmt.Errorf("failed to create containerd container: %w", err) } defer func() { if retErr != nil { @@ -256,13 +259,14 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta }() status := containerstore.Status{CreatedAt: time.Now().UnixNano()} + status = copyResourcesToStatus(spec, status) container, err := containerstore.NewContainer(meta, containerstore.WithStatus(status, containerRootDir), containerstore.WithContainer(cntr), containerstore.WithContainerIO(containerIO), ) if err != nil { - return nil, errors.Wrapf(err, "failed to create internal container object for %q", id) + return nil, fmt.Errorf("failed to create internal container object for %q: %w", id, err) } defer func() { if retErr != nil { @@ -275,9 +279,11 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta // Add container into container store. if err := c.containerStore.Add(container); err != nil { - return nil, errors.Wrapf(err, "failed to add container %q into store", id) + return nil, fmt.Errorf("failed to add container %q into store: %w", id, err) } + containerCreateTimer.WithValues(ociRuntime.Type).UpdateSince(start) + return &runtime.CreateContainerResponse{ContainerId: id}, nil } @@ -299,6 +305,11 @@ func (c *criService) volumeMounts(containerRootDir string, criMounts []*runtime. } volumeID := util.GenerateID() src := filepath.Join(containerRootDir, "volumes", volumeID) + if !filepath.IsAbs(dst) && goruntime.GOOS != "windows" { + oldDst := dst + dst = filepath.Join("/", dst) + log.L.Debugf("Volume destination %q is not absolute, converted to %q", oldDst, dst) + } // addOCIBindMounts will create these volumes. mounts = append(mounts, &runtime.Mount{ ContainerPath: dst, @@ -318,19 +329,19 @@ func (c *criService) runtimeSpec(id string, baseSpecFile string, opts ...oci.Spe if baseSpecFile != "" { baseSpec, ok := c.baseOCISpecs[baseSpecFile] if !ok { - return nil, errors.Errorf("can't find base OCI spec %q", baseSpecFile) + return nil, fmt.Errorf("can't find base OCI spec %q", baseSpecFile) } spec := oci.Spec{} if err := util.DeepCopy(&spec, &baseSpec); err != nil { - return nil, errors.Wrap(err, "failed to clone OCI spec") + return nil, fmt.Errorf("failed to clone OCI spec: %w", err) } // Fix up cgroups path applyOpts := append([]oci.SpecOpts{oci.WithNamespacedCgroup()}, opts...) if err := oci.ApplyOpts(ctx, nil, container, &spec, applyOpts...); err != nil { - return nil, errors.Wrap(err, "failed to apply OCI options") + return nil, fmt.Errorf("failed to apply OCI options: %w", err) } return &spec, nil @@ -338,7 +349,7 @@ func (c *criService) runtimeSpec(id string, baseSpecFile string, opts ...oci.Spe spec, err := oci.GenerateSpec(ctx, nil, container, opts...) if err != nil { - return nil, errors.Wrap(err, "failed to generate spec") + return nil, fmt.Errorf("failed to generate spec: %w", err) } return spec, nil diff --git a/pkg/cri/server/container_create_linux.go b/pkg/cri/server/container_create_linux.go index 4c857df..a99b44d 100644 --- a/pkg/cri/server/container_create_linux.go +++ b/pkg/cri/server/container_create_linux.go @@ -18,6 +18,8 @@ package server import ( "bufio" + "errors" + "fmt" "io" "os" "strconv" @@ -31,8 +33,7 @@ import ( runtimespec "github.com/opencontainers/runtime-spec/specs-go" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" "github.com/containerd/containerd/pkg/cri/config" @@ -187,7 +188,7 @@ func (c *criService) containerSpec( processLabel, mountLabel, err := label.InitLabels(labelOptions) if err != nil { - return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions()) + return nil, fmt.Errorf("failed to init selinux options %+v: %w", securityContext.GetSelinuxOptions(), err) } defer func() { if retErr != nil { @@ -216,7 +217,7 @@ func (c *criService) containerSpec( } } - specOpts = append(specOpts, customopts.WithDevices(c.os, config), + specOpts = append(specOpts, customopts.WithDevices(c.os, config, c.config.DeviceOwnershipFromSecurityContext), customopts.WithCapabilities(securityContext, c.allCaps)) if securityContext.GetPrivileged() { @@ -259,6 +260,15 @@ func (c *criService) containerSpec( supplementalGroups := securityContext.GetSupplementalGroups() + // Get RDT class + rdtClass, err := c.rdtClassFromAnnotations(config.GetMetadata().GetName(), config.Annotations, sandboxConfig.Annotations) + if err != nil { + return nil, fmt.Errorf("failed to set RDT class: %w", err) + } + if rdtClass != "" { + specOpts = append(specOpts, oci.WithRdt(rdtClass, "", "")) + } + for pKey, pValue := range getPassthroughAnnotations(sandboxConfig.Annotations, ociRuntime.PodAnnotations) { specOpts = append(specOpts, customopts.WithAnnotation(pKey, pValue)) @@ -277,7 +287,7 @@ func (c *criService) containerSpec( if nsOpts.GetPid() == runtime.NamespaceMode_TARGET { targetContainer, err := c.validateTargetContainer(sandboxID, nsOpts.TargetId) if err != nil { - return nil, errors.Wrapf(err, "invalid target container") + return nil, fmt.Errorf("invalid target container: %w", err) } status := targetContainer.Status.Get() @@ -291,6 +301,7 @@ func (c *criService) containerSpec( customopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer), customopts.WithAnnotation(annotations.SandboxID, sandboxID), customopts.WithAnnotation(annotations.SandboxNamespace, sandboxConfig.GetMetadata().GetNamespace()), + customopts.WithAnnotation(annotations.SandboxUID, sandboxConfig.GetMetadata().GetUid()), customopts.WithAnnotation(annotations.SandboxName, sandboxConfig.GetMetadata().GetName()), customopts.WithAnnotation(annotations.ContainerName, containerName), customopts.WithAnnotation(annotations.ImageName, imageName), @@ -319,7 +330,7 @@ func (c *criService) containerSpecOpts(config *runtime.ContainerConfig, imageCon securityContext.GetRunAsUser(), securityContext.GetRunAsGroup()) if err != nil { - return nil, errors.Wrap(err, "failed to generate user string") + return nil, fmt.Errorf("failed to generate user string: %w", err) } if userstr == "" { // Lastly, since no user override was passed via CRI try to set via OCI @@ -330,20 +341,23 @@ func (c *criService) containerSpecOpts(config *runtime.ContainerConfig, imageCon specOpts = append(specOpts, oci.WithUser(userstr)) } + userstr = "0" // runtime default if securityContext.GetRunAsUsername() != "" { userstr = securityContext.GetRunAsUsername() - } else { - // Even if RunAsUser is not set, we still call `GetValue` to get uid 0. - // Because it is still useful to get additional gids for uid 0. + } else if securityContext.GetRunAsUser() != nil { userstr = strconv.FormatInt(securityContext.GetRunAsUser().GetValue(), 10) + } else if imageConfig.User != "" { + parts := strings.Split(imageConfig.User, ":") + userstr = parts[0] } - specOpts = append(specOpts, customopts.WithAdditionalGIDs(userstr)) + specOpts = append(specOpts, customopts.WithAdditionalGIDs(userstr), + customopts.WithSupplementalGroups(securityContext.GetSupplementalGroups())) asp := securityContext.GetApparmor() if asp == nil { asp, err = generateApparmorSecurityProfile(securityContext.GetApparmorProfile()) //nolint:staticcheck // Deprecated but we don't want to remove yet if err != nil { - return nil, errors.Wrap(err, "failed to generate apparmor spec opts") + return nil, fmt.Errorf("failed to generate apparmor spec opts: %w", err) } } apparmorSpecOpts, err := generateApparmorSpecOpts( @@ -351,7 +365,7 @@ func (c *criService) containerSpecOpts(config *runtime.ContainerConfig, imageCon securityContext.GetPrivileged(), c.apparmorEnabled()) if err != nil { - return nil, errors.Wrap(err, "failed to generate apparmor spec opts") + return nil, fmt.Errorf("failed to generate apparmor spec opts: %w", err) } if apparmorSpecOpts != nil { specOpts = append(specOpts, apparmorSpecOpts) @@ -363,7 +377,7 @@ func (c *criService) containerSpecOpts(config *runtime.ContainerConfig, imageCon securityContext.GetSeccompProfilePath(), //nolint:staticcheck // Deprecated but we don't want to remove yet c.config.UnsetSeccompProfile) if err != nil { - return nil, errors.Wrap(err, "failed to generate seccomp spec opts") + return nil, fmt.Errorf("failed to generate seccomp spec opts: %w", err) } } seccompSpecOpts, err := c.generateSeccompSpecOpts( @@ -371,7 +385,7 @@ func (c *criService) containerSpecOpts(config *runtime.ContainerConfig, imageCon securityContext.GetPrivileged(), c.seccompEnabled()) if err != nil { - return nil, errors.Wrap(err, "failed to generate seccomp spec opts") + return nil, fmt.Errorf("failed to generate seccomp spec opts: %w", err) } if seccompSpecOpts != nil { specOpts = append(specOpts, seccompSpecOpts) @@ -408,7 +422,7 @@ func generateSecurityProfile(profilePath string) (*runtime.SecurityProfile, erro default: // Require and Trim default profile name prefix if !strings.HasPrefix(profilePath, profileNamePrefix) { - return nil, errors.Errorf("invalid profile %q", profilePath) + return nil, fmt.Errorf("invalid profile %q", profilePath) } return &runtime.SecurityProfile{ ProfileType: runtime.SecurityProfile_Localhost, @@ -494,9 +508,9 @@ func generateApparmorSpecOpts(sp *runtime.SecurityProfile, privileged, apparmorE appArmorProfile := strings.TrimPrefix(sp.LocalhostRef, profileNamePrefix) if profileExists, err := appArmorProfileExists(appArmorProfile); !profileExists { if err != nil { - return nil, errors.Wrap(err, "failed to generate apparmor spec opts") + return nil, fmt.Errorf("failed to generate apparmor spec opts: %w", err) } - return nil, errors.Errorf("apparmor profile not found %s", appArmorProfile) + return nil, fmt.Errorf("apparmor profile not found %s", appArmorProfile) } return apparmor.WithProfile(appArmorProfile), nil default: @@ -559,7 +573,7 @@ func generateUserString(username string, uid, gid *runtime.Int64Value) (string, } if userstr == "" { if groupstr != "" { - return "", errors.Errorf("user group %q is specified without user", groupstr) + return "", fmt.Errorf("user group %q is specified without user", groupstr) } return "", nil } diff --git a/pkg/cri/server/container_create_linux_test.go b/pkg/cri/server/container_create_linux_test.go index a15ec2b..ec538d1 100644 --- a/pkg/cri/server/container_create_linux_test.go +++ b/pkg/cri/server/container_create_linux_test.go @@ -18,6 +18,7 @@ package server import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -33,10 +34,9 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cap" "github.com/containerd/containerd/pkg/cri/annotations" @@ -90,6 +90,7 @@ func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandbox OomScoreAdj: 500, CpusetCpus: "0-1", CpusetMems: "2-3", + Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}, }, SecurityContext: &runtime.LinuxContainerSecurityContext{ SupplementalGroups: []int64{1111, 2222}, @@ -136,6 +137,7 @@ func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandbox assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, 300) assert.EqualValues(t, spec.Linux.Resources.CPU.Cpus, "0-1") assert.EqualValues(t, spec.Linux.Resources.CPU.Mems, "2-3") + assert.EqualValues(t, spec.Linux.Resources.Unified, map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}) assert.EqualValues(t, *spec.Linux.Resources.Memory.Limit, 400) assert.EqualValues(t, *spec.Process.OOMScoreAdj, 500) @@ -177,6 +179,9 @@ func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandbox assert.Contains(t, spec.Annotations, annotations.SandboxNamespace) assert.EqualValues(t, spec.Annotations[annotations.SandboxNamespace], "test-sandbox-ns") + assert.Contains(t, spec.Annotations, annotations.SandboxUID) + assert.EqualValues(t, spec.Annotations[annotations.SandboxUID], "test-sandbox-uid") + assert.Contains(t, spec.Annotations, annotations.SandboxName) assert.EqualValues(t, spec.Annotations[annotations.SandboxName], "test-sandbox-name") @@ -187,7 +192,6 @@ func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandbox } func TestContainerCapabilities(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -267,7 +271,6 @@ func TestContainerCapabilities(t *testing.T) { } func TestContainerSpecTty(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -290,7 +293,6 @@ func TestContainerSpecTty(t *testing.T) { } func TestContainerSpecDefaultPath(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -313,7 +315,6 @@ func TestContainerSpecDefaultPath(t *testing.T) { } func TestContainerSpecReadonlyRootfs(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -331,7 +332,6 @@ func TestContainerSpecReadonlyRootfs(t *testing.T) { } func TestContainerSpecWithExtraMounts(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -393,7 +393,6 @@ func TestContainerSpecWithExtraMounts(t *testing.T) { } func TestContainerAndSandboxPrivileged(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -607,7 +606,6 @@ func TestContainerMounts(t *testing.T) { } func TestPrivilegedBindMount(t *testing.T) { - t.Skip("It requires privilege to mount sysfs and cgroupfs. Not achievable during the build.") testPid := uint32(1234) c := newTestCRIService() testSandboxID := "sandbox-id" @@ -758,7 +756,6 @@ func TestMountPropagation(t *testing.T) { } func TestPidNamespace(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testPid := uint32(1234) testSandboxID := "sandbox-id" @@ -800,7 +797,6 @@ func TestPidNamespace(t *testing.T) { } func TestNoDefaultRunMount(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testPid := uint32(1234) testSandboxID := "sandbox-id" @@ -1105,7 +1101,6 @@ func TestGenerateApparmorSpecOpts(t *testing.T) { } func TestMaskedAndReadonlyPaths(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -1194,7 +1189,6 @@ func TestMaskedAndReadonlyPaths(t *testing.T) { } func TestHostname(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testSandboxID := "sandbox-id" testContainerName := "container-name" @@ -1325,8 +1319,154 @@ func TestGenerateUserString(t *testing.T) { } } +func TestProcessUser(t *testing.T) { + testID := "test-id" + testSandboxID := "sandbox-id" + testContainerName := "container-name" + testPid := uint32(1234) + ociRuntime := config.Runtime{} + c := newTestCRIService() + testContainer := &containers.Container{ID: "64ddfe361f0099f8d59075398feeb3dcb3863b6851df7b946744755066c03e9d"} + ctx := context.Background() + + etcPasswd := ` +root:x:0:0:root:/root:/bin/sh +alice:x:1000:1000:alice:/home/alice:/bin/sh +` // #nosec G101 + etcGroup := ` +root:x:0 +alice:x:1000: +additional-group-for-alice:x:11111:alice +additional-group-for-root:x:22222:root +` + tempRootDir := t.TempDir() + require.NoError(t, + os.MkdirAll(filepath.Join(tempRootDir, "etc"), 0755), + ) + require.NoError(t, + os.WriteFile(filepath.Join(tempRootDir, "etc", "passwd"), []byte(etcPasswd), 0644), + ) + require.NoError(t, + os.WriteFile(filepath.Join(tempRootDir, "etc", "group"), []byte(etcGroup), 0644), + ) + + for desc, test := range map[string]struct { + imageConfigUser string + securityContext *runtime.LinuxContainerSecurityContext + expected runtimespec.User + }{ + "Only SecurityContext was set, SecurityContext defines User": { + securityContext: &runtime.LinuxContainerSecurityContext{ + RunAsUser: &runtime.Int64Value{Value: 1000}, + RunAsGroup: &runtime.Int64Value{Value: 2000}, + SupplementalGroups: []int64{3333}, + }, + expected: runtimespec.User{UID: 1000, GID: 2000, AdditionalGids: []uint32{2000, 3333, 11111}}, + }, + "Only imageConfig.User was set, imageConfig.User defines User": { + imageConfigUser: "1000", + securityContext: nil, + expected: runtimespec.User{UID: 1000, GID: 1000, AdditionalGids: []uint32{1000, 11111}}, + }, + "Both SecurityContext and ImageConfig.User was set, SecurityContext defines User": { + imageConfigUser: "0", + securityContext: &runtime.LinuxContainerSecurityContext{ + RunAsUser: &runtime.Int64Value{Value: 1000}, + RunAsGroup: &runtime.Int64Value{Value: 2000}, + SupplementalGroups: []int64{3333}, + }, + expected: runtimespec.User{UID: 1000, GID: 2000, AdditionalGids: []uint32{2000, 3333, 11111}}, + }, + "No SecurityContext nor ImageConfig.User were set, runtime default defines User": { + expected: runtimespec.User{UID: 0, GID: 0, AdditionalGids: []uint32{0, 22222}}, + }, + } { + desc := desc + t.Run(desc, func(t *testing.T) { + containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData() + containerConfig.Linux.SecurityContext = test.securityContext + imageConfig.User = test.imageConfigUser + + spec, err := c.containerSpec(testID, testSandboxID, testPid, "", testContainerName, testImageName, containerConfig, sandboxConfig, imageConfig, nil, ociRuntime) + require.NoError(t, err) + + spec.Root.Path = tempRootDir // simulating /etc/{passwd, group} + opts, err := c.containerSpecOpts(containerConfig, imageConfig) + require.NoError(t, err) + oci.ApplyOpts(ctx, nil, testContainer, spec, opts...) + + require.Equal(t, test.expected, spec.Process.User) + }) + } +} + +func TestNonRootUserAndDevices(t *testing.T) { + testPid := uint32(1234) + c := newTestCRIService() + testSandboxID := "sandbox-id" + testContainerName := "container-name" + containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData() + + hostDevicesRaw, err := oci.HostDevices() + assert.NoError(t, err) + + testDevice := hostDevicesRaw[0] + + for desc, test := range map[string]struct { + uid, gid *runtime.Int64Value + deviceOwnershipFromSecurityContext bool + expectedDeviceUID uint32 + expectedDeviceGID uint32 + }{ + "expect non-root container's Devices Uid/Gid to be the same as the device Uid/Gid on the host when deviceOwnershipFromSecurityContext is disabled": { + uid: &runtime.Int64Value{Value: 1}, + gid: &runtime.Int64Value{Value: 10}, + expectedDeviceUID: *testDevice.UID, + expectedDeviceGID: *testDevice.GID, + }, + "expect root container's Devices Uid/Gid to be the same as the device Uid/Gid on the host when deviceOwnershipFromSecurityContext is disabled": { + uid: &runtime.Int64Value{Value: 0}, + gid: &runtime.Int64Value{Value: 0}, + expectedDeviceUID: *testDevice.UID, + expectedDeviceGID: *testDevice.GID, + }, + "expect non-root container's Devices Uid/Gid to be the same as RunAsUser/RunAsGroup when deviceOwnershipFromSecurityContext is enabled": { + uid: &runtime.Int64Value{Value: 1}, + gid: &runtime.Int64Value{Value: 10}, + deviceOwnershipFromSecurityContext: true, + expectedDeviceUID: 1, + expectedDeviceGID: 10, + }, + "expect root container's Devices Uid/Gid to be the same as the device Uid/Gid on the host when deviceOwnershipFromSecurityContext is enabled": { + uid: &runtime.Int64Value{Value: 0}, + gid: &runtime.Int64Value{Value: 0}, + deviceOwnershipFromSecurityContext: true, + expectedDeviceUID: *testDevice.UID, + expectedDeviceGID: *testDevice.GID, + }, + } { + t.Logf("TestCase %q", desc) + + c.config.DeviceOwnershipFromSecurityContext = test.deviceOwnershipFromSecurityContext + containerConfig.Linux.SecurityContext.RunAsUser = test.uid + containerConfig.Linux.SecurityContext.RunAsGroup = test.gid + containerConfig.Devices = []*runtime.Device{ + { + ContainerPath: testDevice.Path, + HostPath: testDevice.Path, + Permissions: "r", + }, + } + + spec, err := c.containerSpec(t.Name(), testSandboxID, testPid, "", testContainerName, testImageName, containerConfig, sandboxConfig, imageConfig, nil, config.Runtime{}) + assert.NoError(t, err) + + assert.Equal(t, test.expectedDeviceUID, *spec.Linux.Devices[0].UID) + assert.Equal(t, test.expectedDeviceGID, *spec.Linux.Devices[0].GID) + } +} + func TestPrivilegedDevices(t *testing.T) { - t.Skip("It requires privilege to test devices. Not achievable during the build.") testPid := uint32(1234) c := newTestCRIService() testSandboxID := "sandbox-id" @@ -1389,7 +1529,6 @@ func TestPrivilegedDevices(t *testing.T) { } func TestBaseOCISpec(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") c := newTestCRIService() baseLimit := int64(100) c.baseOCISpecs = map[string]*oci.Spec{ diff --git a/pkg/cri/server/container_create_other.go b/pkg/cri/server/container_create_other.go index c532e0e..21b2cb6 100644 --- a/pkg/cri/server/container_create_other.go +++ b/pkg/cri/server/container_create_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -22,7 +23,7 @@ import ( "github.com/containerd/containerd/oci" imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/config" ) diff --git a/pkg/cri/server/container_create_other_test.go b/pkg/cri/server/container_create_other_test.go index 891c467..50be4cc 100644 --- a/pkg/cri/server/container_create_other_test.go +++ b/pkg/cri/server/container_create_other_test.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -23,7 +24,7 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // checkMount is defined by all tests but not used here diff --git a/pkg/cri/server/container_create_test.go b/pkg/cri/server/container_create_test.go index 8bff1ea..3e10936 100644 --- a/pkg/cri/server/container_create_test.go +++ b/pkg/cri/server/container_create_test.go @@ -27,7 +27,7 @@ import ( runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/config" "github.com/containerd/containerd/pkg/cri/constants" @@ -56,7 +56,6 @@ func checkMount(t *testing.T, mounts []runtimespec.Mount, src, dest, typ string, const testImageName = "container-image-name" func TestGeneralContainerSpec(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") testID := "test-id" testPid := uint32(1234) containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData() @@ -70,7 +69,6 @@ func TestGeneralContainerSpec(t *testing.T) { } func TestPodAnnotationPassthroughContainerSpec(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") if goruntime.GOOS == "darwin" { t.Skip("not implemented on Darwin") } @@ -279,7 +277,6 @@ func TestVolumeMounts(t *testing.T) { } func TestContainerAnnotationPassthroughContainerSpec(t *testing.T) { - t.Skip("It requires HugeTLB controller enabled which requires mounting cgroupfs. Not achievable during the build.") if goruntime.GOOS == "darwin" { t.Skip("not implemented on Darwin") } diff --git a/pkg/cri/server/container_create_windows.go b/pkg/cri/server/container_create_windows.go index a64fe6a..0b1cf82 100644 --- a/pkg/cri/server/container_create_windows.go +++ b/pkg/cri/server/container_create_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,10 +17,13 @@ package server import ( + "errors" + "strconv" + "github.com/containerd/containerd/oci" imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" "github.com/containerd/containerd/pkg/cri/config" @@ -47,9 +48,18 @@ func (c *criService) containerSpec( extraMounts []*runtime.Mount, ociRuntime config.Runtime, ) (*runtimespec.Spec, error) { - specOpts := []oci.SpecOpts{ - customopts.WithProcessArgs(config, imageConfig), + var specOpts []oci.SpecOpts + specOpts = append(specOpts, customopts.WithProcessCommandLineOrArgsForWindows(config, imageConfig)) + + // All containers in a pod need to have HostProcess set if it was set on the pod, + // and vice versa no containers in the pod can be HostProcess if the pods spec + // didn't have the field set. The only case that is valid is if these are the same value. + cntrHpc := config.GetWindows().GetSecurityContext().GetHostProcess() + sandboxHpc := sandboxConfig.GetWindows().GetSecurityContext().GetHostProcess() + if cntrHpc != sandboxHpc { + return nil, errors.New("pod spec and all containers inside must have the HostProcess field set to be valid") } + if config.GetWorkingDir() != "" { specOpts = append(specOpts, oci.WithProcessCwd(config.GetWorkingDir())) } else if imageConfig.WorkingDir != "" { @@ -117,9 +127,11 @@ func (c *criService) containerSpec( customopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer), customopts.WithAnnotation(annotations.SandboxID, sandboxID), customopts.WithAnnotation(annotations.SandboxNamespace, sandboxConfig.GetMetadata().GetNamespace()), + customopts.WithAnnotation(annotations.SandboxUID, sandboxConfig.GetMetadata().GetUid()), customopts.WithAnnotation(annotations.SandboxName, sandboxConfig.GetMetadata().GetName()), customopts.WithAnnotation(annotations.ContainerName, containerName), customopts.WithAnnotation(annotations.ImageName, imageName), + customopts.WithAnnotation(annotations.WindowsHostProcess, strconv.FormatBool(sandboxHpc)), ) return c.runtimeSpec(id, ociRuntime.BaseRuntimeSpec, specOpts...) } diff --git a/pkg/cri/server/container_create_windows_test.go b/pkg/cri/server/container_create_windows_test.go index 3128802..7f8578b 100644 --- a/pkg/cri/server/container_create_windows_test.go +++ b/pkg/cri/server/container_create_windows_test.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -24,12 +22,27 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "github.com/stretchr/testify/require" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" "github.com/containerd/containerd/pkg/cri/config" ) +func getSandboxConfig() *runtime.PodSandboxConfig { + return &runtime.PodSandboxConfig{ + Metadata: &runtime.PodSandboxMetadata{ + Name: "test-sandbox-name", + Uid: "test-sandbox-uid", + Namespace: "test-sandbox-ns", + Attempt: 2, + }, + Windows: &runtime.WindowsPodSandboxConfig{}, + Hostname: "test-hostname", + Annotations: map[string]string{"c": "d"}, + } +} + func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandboxConfig, *imagespec.ImageConfig, func(*testing.T, string, string, uint32, *runtimespec.Spec)) { config := &runtime.ContainerConfig{ @@ -74,19 +87,11 @@ func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandbox SecurityContext: &runtime.WindowsContainerSecurityContext{ RunAsUsername: "test-user", CredentialSpec: "{\"test\": \"spec\"}", + HostProcess: false, }, }, } - sandboxConfig := &runtime.PodSandboxConfig{ - Metadata: &runtime.PodSandboxMetadata{ - Name: "test-sandbox-name", - Uid: "test-sandbox-uid", - Namespace: "test-sandbox-ns", - Attempt: 2, - }, - Hostname: "test-hostname", - Annotations: map[string]string{"c": "d"}, - } + sandboxConfig := getSandboxConfig() imageConfig := &imagespec.ImageConfig{ Env: []string{"ik1=iv1", "ik2=iv2", "ik3=iv3=iv3bis", "ik4=iv4=iv4bis=boop"}, Entrypoint: []string{"/entrypoint"}, @@ -130,8 +135,14 @@ func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandbox assert.Contains(t, spec.Annotations, annotations.SandboxNamespace) assert.EqualValues(t, spec.Annotations[annotations.SandboxNamespace], "test-sandbox-ns") + assert.Contains(t, spec.Annotations, annotations.SandboxUID) + assert.EqualValues(t, spec.Annotations[annotations.SandboxUID], "test-sandbox-uid") + assert.Contains(t, spec.Annotations, annotations.SandboxName) assert.EqualValues(t, spec.Annotations[annotations.SandboxName], "test-sandbox-name") + + assert.Contains(t, spec.Annotations, annotations.WindowsHostProcess) + assert.EqualValues(t, spec.Annotations[annotations.WindowsHostProcess], "false") } return config, sandboxConfig, imageConfig, specCheck } @@ -193,3 +204,153 @@ func TestMountNamedPipe(t *testing.T) { specCheck(t, testID, testSandboxID, testPid, spec) checkMount(t, spec.Mounts, `\\.\pipe\foo`, `\\.\pipe\foo`, "", []string{"rw"}, nil) } + +func TestHostProcessRequirements(t *testing.T) { + testID := "test-id" + testSandboxID := "sandbox-id" + testContainerName := "container-name" + testPid := uint32(1234) + containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData() + ociRuntime := config.Runtime{} + c := newTestCRIService() + for desc, test := range map[string]struct { + containerHostProcess bool + sandboxHostProcess bool + expectError bool + }{ + "hostprocess container in non-hostprocess sandbox should fail": { + containerHostProcess: true, + sandboxHostProcess: false, + expectError: true, + }, + "hostprocess container in hostprocess sandbox should be fine": { + containerHostProcess: true, + sandboxHostProcess: true, + expectError: false, + }, + "non-hostprocess container in hostprocess sandbox should fail": { + containerHostProcess: false, + sandboxHostProcess: true, + expectError: true, + }, + "non-hostprocess container in non-hostprocess sandbox should be fine": { + containerHostProcess: false, + sandboxHostProcess: false, + expectError: false, + }, + } { + t.Run(desc, func(t *testing.T) { + containerConfig.Windows.SecurityContext.HostProcess = test.containerHostProcess + sandboxConfig.Windows.SecurityContext = &runtime.WindowsSandboxSecurityContext{ + HostProcess: test.sandboxHostProcess, + } + _, err := c.containerSpec(testID, testSandboxID, testPid, "", testContainerName, testImageName, containerConfig, sandboxConfig, imageConfig, nil, ociRuntime) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestEntrypointAndCmdForArgsEscaped(t *testing.T) { + testID := "test-id" + testSandboxID := "sandbox-id" + testContainerName := "container-name" + testPid := uint32(1234) + nsPath := "test-ns" + c := newTestCRIService() + + for name, test := range map[string]struct { + imgEntrypoint []string + imgCmd []string + command []string + args []string + expectedArgs []string + expectedCommandLine string + ArgsEscaped bool + }{ + // override image entrypoint and cmd in shell form with container args and verify expected runtime spec + "TestShellFormImgEntrypointCmdWithCtrArgs": { + imgEntrypoint: []string{`"C:\My Folder\MyProcess.exe" -arg1 "test value"`}, + imgCmd: []string{`cmd -args "hello world"`}, + command: nil, + args: []string{`cmd -args "additional args"`}, + expectedArgs: nil, + expectedCommandLine: `"C:\My Folder\MyProcess.exe" -arg1 "test value" "cmd -args \"additional args\""`, + ArgsEscaped: true, + }, + // check image entrypoint and cmd in shell form without overriding with container command and args and verify expected runtime spec + "TestShellFormImgEntrypointCmdWithoutCtrArgs": { + imgEntrypoint: []string{`"C:\My Folder\MyProcess.exe" -arg1 "test value"`}, + imgCmd: []string{`cmd -args "hello world"`}, + command: nil, + args: nil, + expectedArgs: nil, + expectedCommandLine: `"C:\My Folder\MyProcess.exe" -arg1 "test value" "cmd -args \"hello world\""`, + ArgsEscaped: true, + }, + // override image entrypoint and cmd by container command and args in shell form and verify expected runtime spec + "TestShellFormImgEntrypointCmdWithCtrEntrypointAndArgs": { + imgEntrypoint: []string{`"C:\My Folder\MyProcess.exe" -arg1 "test value"`}, + imgCmd: []string{`cmd -args "hello world"`}, + command: []string{`C:\My Folder\MyProcess.exe`, "-arg1", "additional test value"}, + args: []string{"cmd", "-args", "additional args"}, + expectedArgs: nil, + expectedCommandLine: `"C:\My Folder\MyProcess.exe" -arg1 "additional test value" cmd -args "additional args"`, + ArgsEscaped: true, + }, + // override image cmd by container args in exec form and verify expected runtime spec + "TestExecFormImgEntrypointCmdWithCtrArgs": { + imgEntrypoint: []string{`C:\My Folder\MyProcess.exe`, "-arg1", "test value"}, + imgCmd: []string{"cmd", "-args", "hello world"}, + command: nil, + args: []string{"additional", "args"}, + expectedArgs: []string{`C:\My Folder\MyProcess.exe`, "-arg1", "test value", "additional", "args"}, + expectedCommandLine: "", + ArgsEscaped: false, + }, + // check image entrypoint and cmd in exec form without overriding with container command and args and verify expected runtime spec + "TestExecFormImgEntrypointCmdWithoutCtrArgs": { + imgEntrypoint: []string{`C:\My Folder\MyProcess.exe`, "-arg1", "test value"}, + imgCmd: []string{"cmd", "-args", "hello world"}, + command: nil, + args: nil, + expectedArgs: []string{`C:\My Folder\MyProcess.exe`, "-arg1", "test value", "cmd", "-args", "hello world"}, + expectedCommandLine: "", + ArgsEscaped: false, + }, + } { + t.Run(name, func(t *testing.T) { + imageConfig := &imagespec.ImageConfig{ + Entrypoint: test.imgEntrypoint, + Cmd: test.imgCmd, + ArgsEscaped: test.ArgsEscaped, + } + sandboxConfig := getSandboxConfig() + containerConfig := &runtime.ContainerConfig{ + Metadata: &runtime.ContainerMetadata{ + Name: "test-name", + Attempt: 1, + }, + Image: &runtime.ImageSpec{ + Image: testImageName, + }, + Command: test.command, + Args: test.args, + Windows: &runtime.WindowsContainerConfig{}, + } + runtimeSpec, err := c.containerSpec(testID, testSandboxID, testPid, nsPath, testContainerName, testImageName, containerConfig, sandboxConfig, imageConfig, nil, config.Runtime{}) + assert.NoError(t, err) + assert.NotNil(t, runtimeSpec) + + // check the runtime spec for expected commandline and args + actualCommandLine := runtimeSpec.Process.CommandLine + actualArgs := runtimeSpec.Process.Args + + require.Equal(t, actualArgs, test.expectedArgs) + require.Equal(t, actualCommandLine, test.expectedCommandLine) + }) + } +} diff --git a/vendor/github.com/containerd/console/pty_unix.go b/pkg/cri/server/container_events.go similarity index 64% rename from vendor/github.com/containerd/console/pty_unix.go rename to pkg/cri/server/container_events.go index d5a6bd8..560798f 100644 --- a/vendor/github.com/containerd/console/pty_unix.go +++ b/pkg/cri/server/container_events.go @@ -1,5 +1,3 @@ -// +build darwin linux netbsd openbsd solaris - /* Copyright The containerd Authors. @@ -16,15 +14,14 @@ limitations under the License. */ -package console +package server import ( - "os" - - "golang.org/x/sys/unix" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) -// openpt allocates a new pseudo-terminal by opening the /dev/ptmx device -func openpt() (*os.File, error) { - return os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) +func (c *criService) GetContainerEvents(r *runtime.GetEventsRequest, s runtime.RuntimeService_GetContainerEventsServer) error { + return status.Errorf(codes.Unimplemented, "method GetContainerEvents not implemented") } diff --git a/pkg/cri/server/container_exec.go b/pkg/cri/server/container_exec.go index ae5498f..0a27792 100644 --- a/pkg/cri/server/container_exec.go +++ b/pkg/cri/server/container_exec.go @@ -17,20 +17,21 @@ package server import ( - "github.com/pkg/errors" + "fmt" + "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // Exec prepares a streaming endpoint to execute a command in the container, and returns the address. func (c *criService) Exec(ctx context.Context, r *runtime.ExecRequest) (*runtime.ExecResponse, error) { cntr, err := c.containerStore.Get(r.GetContainerId()) if err != nil { - return nil, errors.Wrapf(err, "failed to find container %q in store", r.GetContainerId()) + return nil, fmt.Errorf("failed to find container %q in store: %w", r.GetContainerId(), err) } state := cntr.Status.Get().State() if state != runtime.ContainerState_CONTAINER_RUNNING { - return nil, errors.Errorf("container is in %s state", criContainerStateToString(state)) + return nil, fmt.Errorf("container is in %s state", criContainerStateToString(state)) } return c.streamServer.GetExec(r) } diff --git a/pkg/cri/server/container_execsync.go b/pkg/cri/server/container_execsync.go index 8bb5b8f..68cfb09 100644 --- a/pkg/cri/server/container_execsync.go +++ b/pkg/cri/server/container_execsync.go @@ -18,8 +18,8 @@ package server import ( "bytes" + "fmt" "io" - "errors" "syscall" "time" @@ -28,10 +28,9 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/containerd/containerd/oci" - "github.com/pkg/errors" "golang.org/x/net/context" "k8s.io/client-go/tools/remotecommand" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cio "github.com/containerd/containerd/pkg/cri/io" "github.com/containerd/containerd/pkg/cri/util" @@ -91,7 +90,7 @@ func (c *criService) ExecSync(ctx context.Context, r *runtime.ExecSyncRequest) ( timeout: time.Duration(r.GetTimeout()) * time.Second, }) if err != nil { - return nil, errors.Wrap(err, "failed to exec in container") + return nil, fmt.Errorf("failed to exec in container: %w", err) } return &runtime.ExecSyncResponse{ @@ -121,22 +120,24 @@ func (c *criService) execInternal(ctx context.Context, container containerd.Cont spec, err := container.Spec(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get container spec") + return nil, fmt.Errorf("failed to get container spec: %w", err) } task, err := container.Task(ctx, nil) if err != nil { - return nil, errors.Wrap(err, "failed to load task") + return nil, fmt.Errorf("failed to load task: %w", err) } pspec := spec.Process pspec.Terminal = opts.tty if opts.tty { if err := oci.WithEnv([]string{"TERM=xterm"})(ctx, nil, nil, spec); err != nil { - return nil, errors.Wrap(err, "add TERM env var to spec") + return nil, fmt.Errorf("add TERM env var to spec: %w", err) } } pspec.Args = opts.cmd + // CommandLine may already be set on the container's spec, but we want to only use Args here. + pspec.CommandLine = "" if opts.stdout == nil { opts.stdout = cio.NewDiscardLogger() @@ -156,7 +157,7 @@ func (c *criService) execInternal(ctx context.Context, container containerd.Cont }, ) if err != nil { - return nil, errors.Wrapf(err, "failed to create exec %q", execID) + return nil, fmt.Errorf("failed to create exec %q: %w", execID, err) } defer func() { deferCtx, deferCancel := ctrdutil.DeferContext() @@ -168,10 +169,10 @@ func (c *criService) execInternal(ctx context.Context, container containerd.Cont exitCh, err := process.Wait(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to wait for process %q", execID) + return nil, fmt.Errorf("failed to wait for process %q: %w", execID, err) } if err := process.Start(ctx); err != nil { - return nil, errors.Wrapf(err, "failed to start exec %q", execID) + return nil, fmt.Errorf("failed to start exec %q: %w", execID, err) } handleResizing(ctx, opts.resize, func(size remotecommand.TerminalSize) { @@ -202,7 +203,7 @@ func (c *criService) execInternal(ctx context.Context, container containerd.Cont case <-execCtx.Done(): // Ignore the not found error because the process may exit itself before killing. if err := process.Kill(ctx, syscall.SIGKILL); err != nil && !errdefs.IsNotFound(err) { - return nil, errors.Wrapf(err, "failed to kill exec %q", execID) + return nil, fmt.Errorf("failed to kill exec %q: %w", execID, err) } // Wait for the process to be killed. exitRes := <-exitCh @@ -210,12 +211,12 @@ func (c *criService) execInternal(ctx context.Context, container containerd.Cont execID, exitRes.ExitCode(), exitRes.Error()) <-attachDone log.G(ctx).Debugf("Stream pipe for exec process %q done", execID) - return nil, errors.Wrapf(execCtx.Err(), "timeout %v exceeded", opts.timeout) + return nil, fmt.Errorf("timeout %v exceeded: %w", opts.timeout, execCtx.Err()) case exitRes := <-exitCh: code, _, err := exitRes.Result() log.G(ctx).Debugf("Exec process %q exits with exit code %d and error %v", execID, code, err) if err != nil { - return nil, errors.Wrapf(err, "failed while waiting for exec %q", execID) + return nil, fmt.Errorf("failed while waiting for exec %q: %w", execID, err) } <-attachDone log.G(ctx).Debugf("Stream pipe for exec process %q done", execID) @@ -240,13 +241,13 @@ func (c *criService) execInContainer(ctx context.Context, id string, opts execOp cntr, err := c.containerStore.Get(id) if err != nil { - return nil, errors.Wrapf(err, "failed to find container %q in store", id) + return nil, fmt.Errorf("failed to find container %q in store: %w", id, err) } id = cntr.ID state := cntr.Status.Get().State() if state != runtime.ContainerState_CONTAINER_RUNNING { - return nil, errors.Errorf("container is in %s state", criContainerStateToString(state)) + return nil, fmt.Errorf("container is in %s state", criContainerStateToString(state)) } return c.execInternal(ctx, cntr.Container, id, opts) diff --git a/pkg/cri/server/container_list.go b/pkg/cri/server/container_list.go index 74be1a1..6ac9efd 100644 --- a/pkg/cri/server/container_list.go +++ b/pkg/cri/server/container_list.go @@ -17,15 +17,18 @@ package server import ( + "time" + "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" ) // ListContainers lists all containers matching the filter. func (c *criService) ListContainers(ctx context.Context, r *runtime.ListContainersRequest) (*runtime.ListContainersResponse, error) { + start := time.Now() // List all containers from store. containersInStore := c.containerStore.List() @@ -35,6 +38,8 @@ func (c *criService) ListContainers(ctx context.Context, r *runtime.ListContaine } containers = c.filterCRIContainers(containers, r.GetFilter()) + + containerListTimer.UpdateSince(start) return &runtime.ListContainersResponse{Containers: containers}, nil } diff --git a/pkg/cri/server/container_list_test.go b/pkg/cri/server/container_list_test.go index 5def2e7..fcac335 100644 --- a/pkg/cri/server/container_list_test.go +++ b/pkg/cri/server/container_list_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" diff --git a/pkg/cri/server/container_log_reopen.go b/pkg/cri/server/container_log_reopen.go index b15bb62..292d92d 100644 --- a/pkg/cri/server/container_log_reopen.go +++ b/pkg/cri/server/container_log_reopen.go @@ -17,10 +17,12 @@ package server import ( - "github.com/pkg/errors" + "errors" + "fmt" + "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // ReopenContainerLog asks the cri plugin to reopen the stdout/stderr log file for the container. @@ -28,7 +30,7 @@ import ( func (c *criService) ReopenContainerLog(ctx context.Context, r *runtime.ReopenContainerLogRequest) (*runtime.ReopenContainerLogResponse, error) { container, err := c.containerStore.Get(r.GetContainerId()) if err != nil { - return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId()) + return nil, fmt.Errorf("an error occurred when try to find container %q: %w", r.GetContainerId(), err) } if container.Status.Get().State() != runtime.ContainerState_CONTAINER_RUNNING { diff --git a/pkg/cri/server/container_remove.go b/pkg/cri/server/container_remove.go index c27830e..ddbd1c9 100644 --- a/pkg/cri/server/container_remove.go +++ b/pkg/cri/server/container_remove.go @@ -17,30 +17,46 @@ package server import ( + "errors" + "fmt" + "time" + "github.com/containerd/containerd" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" + containerstore "github.com/containerd/containerd/pkg/cri/store/container" "github.com/sirupsen/logrus" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - - "github.com/containerd/containerd/pkg/cri/store" - containerstore "github.com/containerd/containerd/pkg/cri/store/container" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // RemoveContainer removes the container. func (c *criService) RemoveContainer(ctx context.Context, r *runtime.RemoveContainerRequest) (_ *runtime.RemoveContainerResponse, retErr error) { - container, err := c.containerStore.Get(r.GetContainerId()) + start := time.Now() + ctrID := r.GetContainerId() + container, err := c.containerStore.Get(ctrID) if err != nil { - if err != store.ErrNotExist { - return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId()) + if !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("an error occurred when try to find container %q: %w", ctrID, err) } // Do not return error if container metadata doesn't exist. - log.G(ctx).Tracef("RemoveContainer called for container %q that does not exist", r.GetContainerId()) + log.G(ctx).Tracef("RemoveContainer called for container %q that does not exist", ctrID) return &runtime.RemoveContainerResponse{}, nil } id := container.ID + i, err := container.Container.Info(ctx) + if err != nil { + if !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("get container info: %w", err) + } + // Since containerd doesn't see the container and criservice's content store does, + // we should try to recover from this state by removing entry for this container + // from the container store as well and return successfully. + log.G(ctx).WithError(err).Warn("get container info failed") + c.containerStore.Delete(ctrID) + c.containerNameIndex.ReleaseByKey(ctrID) + return &runtime.RemoveContainerResponse{}, nil + } // Forcibly stop the containers if they are in running or unknown state state := container.Status.Get().State() @@ -48,7 +64,7 @@ func (c *criService) RemoveContainer(ctx context.Context, r *runtime.RemoveConta state == runtime.ContainerState_CONTAINER_UNKNOWN { logrus.Infof("Forcibly stopping container %q", id) if err := c.stopContainer(ctx, container, 0); err != nil { - return nil, errors.Wrapf(err, "failed to forcibly stop container %q", id) + return nil, fmt.Errorf("failed to forcibly stop container %q: %w", id, err) } } @@ -56,7 +72,7 @@ func (c *criService) RemoveContainer(ctx context.Context, r *runtime.RemoveConta // Set removing state to prevent other start/remove operations against this container // while it's being removed. if err := setContainerRemoving(container); err != nil { - return nil, errors.Wrapf(err, "failed to set removing state for container %q", id) + return nil, fmt.Errorf("failed to set removing state for container %q: %w", id, err) } defer func() { if retErr != nil { @@ -75,31 +91,33 @@ func (c *criService) RemoveContainer(ctx context.Context, r *runtime.RemoveConta // Delete containerd container. if err := container.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { if !errdefs.IsNotFound(err) { - return nil, errors.Wrapf(err, "failed to delete containerd container %q", id) + return nil, fmt.Errorf("failed to delete containerd container %q: %w", id, err) } log.G(ctx).Tracef("Remove called for containerd container %q that does not exist", id) } // Delete container checkpoint. if err := container.Delete(); err != nil { - return nil, errors.Wrapf(err, "failed to delete container checkpoint for %q", id) + return nil, fmt.Errorf("failed to delete container checkpoint for %q: %w", id, err) } containerRootDir := c.getContainerRootDir(id) if err := ensureRemoveAll(ctx, containerRootDir); err != nil { - return nil, errors.Wrapf(err, "failed to remove container root directory %q", - containerRootDir) + return nil, fmt.Errorf("failed to remove container root directory %q: %w", + containerRootDir, err) } volatileContainerRootDir := c.getVolatileContainerRootDir(id) if err := ensureRemoveAll(ctx, volatileContainerRootDir); err != nil { - return nil, errors.Wrapf(err, "failed to remove volatile container root directory %q", - volatileContainerRootDir) + return nil, fmt.Errorf("failed to remove volatile container root directory %q: %w", + volatileContainerRootDir, err) } c.containerStore.Delete(id) c.containerNameIndex.ReleaseByKey(id) + containerRemoveTimer.WithValues(i.Runtime.Name).UpdateSince(start) + return &runtime.RemoveContainerResponse{}, nil } diff --git a/pkg/cri/server/container_start.go b/pkg/cri/server/container_start.go index 11390f1..35ec64b 100644 --- a/pkg/cri/server/container_start.go +++ b/pkg/cri/server/container_start.go @@ -17,6 +17,8 @@ package server import ( + "errors" + "fmt" "io" "time" @@ -26,10 +28,9 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/nri" v1 "github.com/containerd/nri/types/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cio "github.com/containerd/containerd/pkg/cri/io" containerstore "github.com/containerd/containerd/pkg/cri/store/container" @@ -40,9 +41,15 @@ import ( // StartContainer starts the container. func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContainerRequest) (retRes *runtime.StartContainerResponse, retErr error) { + start := time.Now() cntr, err := c.containerStore.Get(r.GetContainerId()) if err != nil { - return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId()) + return nil, fmt.Errorf("an error occurred when try to find container %q: %w", r.GetContainerId(), err) + } + + info, err := cntr.Container.Info(ctx) + if err != nil { + return nil, fmt.Errorf("get container info: %w", err) } id := cntr.ID @@ -53,7 +60,7 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain // Set starting state to prevent other start/remove operations against this container // while it's being started. if err := setContainerStarting(cntr); err != nil { - return nil, errors.Wrapf(err, "failed to set starting state for container %q", id) + return nil, fmt.Errorf("failed to set starting state for container %q: %w", id, err) } defer func() { if retErr != nil { @@ -77,11 +84,11 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain // Get sandbox config from sandbox store. sandbox, err := c.sandboxStore.Get(meta.SandboxID) if err != nil { - return nil, errors.Wrapf(err, "sandbox %q not found", meta.SandboxID) + return nil, fmt.Errorf("sandbox %q not found: %w", meta.SandboxID, err) } sandboxID := meta.SandboxID if sandbox.Status.Get().State != sandboxstore.StateReady { - return nil, errors.Errorf("sandbox container %q is not running", sandboxID) + return nil, fmt.Errorf("sandbox container %q is not running", sandboxID) } // Recheck target container validity in Linux namespace options. @@ -90,7 +97,7 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain if nsOpts.GetPid() == runtime.NamespaceMode_TARGET { _, err := c.validateTargetContainer(sandboxID, nsOpts.TargetId) if err != nil { - return nil, errors.Wrap(err, "invalid target container") + return nil, fmt.Errorf("invalid target container: %w", err) } } } @@ -98,7 +105,7 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain ioCreation := func(id string) (_ containerdio.IO, err error) { stdoutWC, stderrWC, err := c.createContainerLoggers(meta.LogPath, config.GetTty()) if err != nil { - return nil, errors.Wrap(err, "failed to create container loggers") + return nil, fmt.Errorf("failed to create container loggers: %w", err) } cntr.IO.AddOutput("log", stdoutWC, stderrWC) cntr.IO.Pipe() @@ -107,13 +114,21 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain ctrInfo, err := container.Info(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get container info") + return nil, fmt.Errorf("failed to get container info: %w", err) + } + + ociRuntime, err := c.getSandboxRuntime(sandbox.Config, sandbox.Metadata.RuntimeHandler) + if err != nil { + return nil, fmt.Errorf("failed to get sandbox runtime: %w", err) } taskOpts := c.taskOpts(ctrInfo.Runtime.Name) + if ociRuntime.Path != "" { + taskOpts = append(taskOpts, containerd.WithRuntimePath(ociRuntime.Path)) + } task, err := container.NewTask(ctx, ioCreation, taskOpts...) if err != nil { - return nil, errors.Wrap(err, "failed to create containerd task") + return nil, fmt.Errorf("failed to create containerd task: %w", err) } defer func() { if retErr != nil { @@ -129,7 +144,7 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain // wait is a long running background request, no timeout needed. exitCh, err := task.Wait(ctrdutil.NamespacedContext()) if err != nil { - return nil, errors.Wrap(err, "failed to wait for containerd task") + return nil, fmt.Errorf("failed to wait for containerd task: %w", err) } nric, err := nri.New() if err != nil { @@ -141,13 +156,13 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain Labels: sandbox.Config.Labels, } if _, err := nric.InvokeWithSandbox(ctx, task, v1.Create, nriSB); err != nil { - return nil, errors.Wrap(err, "nri invoke") + return nil, fmt.Errorf("nri invoke: %w", err) } } // Start containerd task. if err := task.Start(ctx); err != nil { - return nil, errors.Wrapf(err, "failed to start containerd task %q", id) + return nil, fmt.Errorf("failed to start containerd task %q: %w", id, err) } // Update container start timestamp. @@ -156,12 +171,14 @@ func (c *criService) StartContainer(ctx context.Context, r *runtime.StartContain status.StartedAt = time.Now().UnixNano() return status, nil }); err != nil { - return nil, errors.Wrapf(err, "failed to update container %q state", id) + return nil, fmt.Errorf("failed to update container %q state: %w", id, err) } // It handles the TaskExit event and update container state after this. c.eventMonitor.startContainerExitMonitor(context.Background(), id, task.Pid(), exitCh) + containerStartTimer.WithValues(info.Runtime.Name).UpdateSince(start) + return &runtime.StartContainerResponse{}, nil } @@ -171,7 +188,7 @@ func setContainerStarting(container containerstore.Container) error { return container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) { // Return error if container is not in created state. if status.State() != runtime.ContainerState_CONTAINER_CREATED { - return status, errors.Errorf("container is in %s state", criContainerStateToString(status.State())) + return status, fmt.Errorf("container is in %s state", criContainerStateToString(status.State())) } // Do not start the container when there is a removal in progress. if status.Removing { @@ -200,7 +217,7 @@ func (c *criService) createContainerLoggers(logPath string, tty bool) (stdout io // Only generate container log when log path is specified. f, err := openLogFile(logPath) if err != nil { - return nil, nil, errors.Wrap(err, "failed to create and open log file") + return nil, nil, fmt.Errorf("failed to create and open log file: %w", err) } defer func() { if err != nil { diff --git a/pkg/cri/server/container_stats.go b/pkg/cri/server/container_stats.go index 22607cd..c2a8296 100644 --- a/pkg/cri/server/container_stats.go +++ b/pkg/cri/server/container_stats.go @@ -17,10 +17,11 @@ package server import ( + "fmt" + tasks "github.com/containerd/containerd/api/services/tasks/v1" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // ContainerStats returns stats of the container. If the container does not @@ -28,20 +29,20 @@ import ( func (c *criService) ContainerStats(ctx context.Context, in *runtime.ContainerStatsRequest) (*runtime.ContainerStatsResponse, error) { cntr, err := c.containerStore.Get(in.GetContainerId()) if err != nil { - return nil, errors.Wrap(err, "failed to find container") + return nil, fmt.Errorf("failed to find container: %w", err) } request := &tasks.MetricsRequest{Filters: []string{"id==" + cntr.ID}} resp, err := c.client.TaskService().Metrics(ctx, request) if err != nil { - return nil, errors.Wrap(err, "failed to fetch metrics for task") + return nil, fmt.Errorf("failed to fetch metrics for task: %w", err) } if len(resp.Metrics) != 1 { - return nil, errors.Errorf("unexpected metrics response: %+v", resp.Metrics) + return nil, fmt.Errorf("unexpected metrics response: %+v", resp.Metrics) } cs, err := c.containerMetrics(cntr.Metadata, resp.Metrics[0]) if err != nil { - return nil, errors.Wrap(err, "failed to decode container metrics") + return nil, fmt.Errorf("failed to decode container metrics: %w", err) } return &runtime.ContainerStatsResponse{Stats: cs}, nil } diff --git a/pkg/cri/server/container_stats_list.go b/pkg/cri/server/container_stats_list.go index 777d105..bc796fb 100644 --- a/pkg/cri/server/container_stats_list.go +++ b/pkg/cri/server/container_stats_list.go @@ -17,11 +17,12 @@ package server import ( + "fmt" + tasks "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" ) @@ -33,15 +34,15 @@ func (c *criService) ListContainerStats( ) (*runtime.ListContainerStatsResponse, error) { request, containers, err := c.buildTaskMetricsRequest(in) if err != nil { - return nil, errors.Wrap(err, "failed to build metrics request") + return nil, fmt.Errorf("failed to build metrics request: %w", err) } resp, err := c.client.TaskService().Metrics(ctx, &request) if err != nil { - return nil, errors.Wrap(err, "failed to fetch metrics for tasks") + return nil, fmt.Errorf("failed to fetch metrics for tasks: %w", err) } criStats, err := c.toCRIContainerStats(resp.Metrics, containers) if err != nil { - return nil, errors.Wrap(err, "failed to convert to cri containerd stats format") + return nil, fmt.Errorf("failed to convert to cri containerd stats format: %w", err) } return criStats, nil } @@ -58,7 +59,7 @@ func (c *criService) toCRIContainerStats( for _, cntr := range containers { cs, err := c.containerMetrics(cntr.Metadata, statsMap[cntr.ID]) if err != nil { - return nil, errors.Wrapf(err, "failed to decode container metrics for %q", cntr.ID) + return nil, fmt.Errorf("failed to decode container metrics for %q: %w", cntr.ID, err) } containerStats.Stats = append(containerStats.Stats, cs) } @@ -81,7 +82,7 @@ func (c *criService) buildTaskMetricsRequest( ) (tasks.MetricsRequest, []containerstore.Container, error) { var req tasks.MetricsRequest if r.GetFilter() == nil { - return req, nil, nil + return req, c.containerStore.List(), nil } c.normalizeContainerStatsFilter(r.GetFilter()) var containers []containerstore.Container diff --git a/pkg/cri/server/container_stats_list_linux.go b/pkg/cri/server/container_stats_list_linux.go index 6ef881f..77372d1 100644 --- a/pkg/cri/server/container_stats_list_linux.go +++ b/pkg/cri/server/container_stats_list_linux.go @@ -17,14 +17,17 @@ package server import ( + "fmt" + "time" + "github.com/containerd/containerd/api/types" v1 "github.com/containerd/containerd/metrics/types/v1" v2 "github.com/containerd/containerd/metrics/types/v2" "github.com/containerd/typeurl" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" + stats "github.com/containerd/containerd/pkg/cri/store/stats" ) func (c *criService) containerMetrics( @@ -58,47 +61,91 @@ func (c *criService) containerMetrics( if stats != nil { s, err := typeurl.UnmarshalAny(stats.Data) if err != nil { - return nil, errors.Wrap(err, "failed to extract container metrics") + return nil, fmt.Errorf("failed to extract container metrics: %w", err) } - switch metrics := s.(type) { - case *v1.Metrics: - if metrics.CPU != nil && metrics.CPU.Usage != nil { - cs.Cpu = &runtime.CpuUsage{ - Timestamp: stats.Timestamp.UnixNano(), - UsageCoreNanoSeconds: &runtime.UInt64Value{Value: metrics.CPU.Usage.Total}, - } - } - if metrics.Memory != nil && metrics.Memory.Usage != nil { - cs.Memory = &runtime.MemoryUsage{ - Timestamp: stats.Timestamp.UnixNano(), - WorkingSetBytes: &runtime.UInt64Value{ - Value: getWorkingSet(metrics.Memory), - }, - } - } - case *v2.Metrics: - if metrics.CPU != nil { - cs.Cpu = &runtime.CpuUsage{ - Timestamp: stats.Timestamp.UnixNano(), - UsageCoreNanoSeconds: &runtime.UInt64Value{Value: metrics.CPU.UsageUsec * 1000}, - } - } - if metrics.Memory != nil { - cs.Memory = &runtime.MemoryUsage{ - Timestamp: stats.Timestamp.UnixNano(), - WorkingSetBytes: &runtime.UInt64Value{ - Value: getWorkingSetV2(metrics.Memory), - }, - } - } - default: - return &cs, errors.Errorf("unexpected metrics type: %v", metrics) + + cpuStats, err := c.cpuContainerStats(meta.ID, false /* isSandbox */, s, stats.Timestamp) + if err != nil { + return nil, fmt.Errorf("failed to obtain cpu stats: %w", err) } + cs.Cpu = cpuStats + + memoryStats, err := c.memoryContainerStats(meta.ID, s, stats.Timestamp) + if err != nil { + return nil, fmt.Errorf("failed to obtain memory stats: %w", err) + } + cs.Memory = memoryStats } return &cs, nil } +func (c *criService) getUsageNanoCores(containerID string, isSandbox bool, currentUsageCoreNanoSeconds uint64, currentTimestamp time.Time) (uint64, error) { + var oldStats *stats.ContainerStats + + if isSandbox { + sandbox, err := c.sandboxStore.Get(containerID) + if err != nil { + return 0, fmt.Errorf("failed to get sandbox container: %s: %w", containerID, err) + } + oldStats = sandbox.Stats + } else { + container, err := c.containerStore.Get(containerID) + if err != nil { + return 0, fmt.Errorf("failed to get container ID: %s: %w", containerID, err) + } + oldStats = container.Stats + } + + if oldStats == nil { + newStats := &stats.ContainerStats{ + UsageCoreNanoSeconds: currentUsageCoreNanoSeconds, + Timestamp: currentTimestamp, + } + if isSandbox { + err := c.sandboxStore.UpdateContainerStats(containerID, newStats) + if err != nil { + return 0, fmt.Errorf("failed to update sandbox stats container ID: %s: %w", containerID, err) + } + } else { + err := c.containerStore.UpdateContainerStats(containerID, newStats) + if err != nil { + return 0, fmt.Errorf("failed to update container stats ID: %s: %w", containerID, err) + } + } + return 0, nil + } + + nanoSeconds := currentTimestamp.UnixNano() - oldStats.Timestamp.UnixNano() + + // zero or negative interval + if nanoSeconds <= 0 { + return 0, nil + } + + newUsageNanoCores := uint64(float64(currentUsageCoreNanoSeconds-oldStats.UsageCoreNanoSeconds) / + float64(nanoSeconds) * float64(time.Second/time.Nanosecond)) + + newStats := &stats.ContainerStats{ + UsageCoreNanoSeconds: currentUsageCoreNanoSeconds, + Timestamp: currentTimestamp, + } + if isSandbox { + err := c.sandboxStore.UpdateContainerStats(containerID, newStats) + if err != nil { + return 0, fmt.Errorf("failed to update sandbox container stats: %s: %w", containerID, err) + } + + } else { + err := c.containerStore.UpdateContainerStats(containerID, newStats) + if err != nil { + return 0, fmt.Errorf("failed to update container stats ID: %s: %w", containerID, err) + } + } + + return newUsageNanoCores, nil +} + // getWorkingSet calculates workingset memory from cgroup memory stats. // The caller should make sure memory is not nil. // workingset = usage - total_inactive_file @@ -123,3 +170,109 @@ func getWorkingSetV2(memory *v2.MemoryStat) uint64 { } return workingSet } + +func isMemoryUnlimited(v uint64) bool { + // Size after which we consider memory to be "unlimited". This is not + // MaxInt64 due to rounding by the kernel. + // TODO: k8s or cadvisor should export this https://github.com/google/cadvisor/blob/2b6fbacac7598e0140b5bc8428e3bdd7d86cf5b9/metrics/prometheus.go#L1969-L1971 + const maxMemorySize = uint64(1 << 62) + + return v > maxMemorySize +} + +// https://github.com/kubernetes/kubernetes/blob/b47f8263e18c7b13dba33fba23187e5e0477cdbd/pkg/kubelet/stats/helper.go#L68-L71 +func getAvailableBytes(memory *v1.MemoryStat, workingSetBytes uint64) uint64 { + // memory limit - working set bytes + if !isMemoryUnlimited(memory.Usage.Limit) { + return memory.Usage.Limit - workingSetBytes + } + return 0 +} + +func getAvailableBytesV2(memory *v2.MemoryStat, workingSetBytes uint64) uint64 { + // memory limit (memory.max) for cgroupv2 - working set bytes + if !isMemoryUnlimited(memory.UsageLimit) { + return memory.UsageLimit - workingSetBytes + } + return 0 +} + +func (c *criService) cpuContainerStats(ID string, isSandbox bool, stats interface{}, timestamp time.Time) (*runtime.CpuUsage, error) { + switch metrics := stats.(type) { + case *v1.Metrics: + if metrics.CPU != nil && metrics.CPU.Usage != nil { + + usageNanoCores, err := c.getUsageNanoCores(ID, isSandbox, metrics.CPU.Usage.Total, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to get usage nano cores, containerID: %s: %w", ID, err) + } + + return &runtime.CpuUsage{ + Timestamp: timestamp.UnixNano(), + UsageCoreNanoSeconds: &runtime.UInt64Value{Value: metrics.CPU.Usage.Total}, + UsageNanoCores: &runtime.UInt64Value{Value: usageNanoCores}, + }, nil + } + case *v2.Metrics: + if metrics.CPU != nil { + // convert to nano seconds + usageCoreNanoSeconds := metrics.CPU.UsageUsec * 1000 + + usageNanoCores, err := c.getUsageNanoCores(ID, isSandbox, usageCoreNanoSeconds, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to get usage nano cores, containerID: %s: %w", ID, err) + } + + return &runtime.CpuUsage{ + Timestamp: timestamp.UnixNano(), + UsageCoreNanoSeconds: &runtime.UInt64Value{Value: usageCoreNanoSeconds}, + UsageNanoCores: &runtime.UInt64Value{Value: usageNanoCores}, + }, nil + } + default: + return nil, fmt.Errorf("unexpected metrics type: %v", metrics) + } + return nil, nil +} + +func (c *criService) memoryContainerStats(ID string, stats interface{}, timestamp time.Time) (*runtime.MemoryUsage, error) { + switch metrics := stats.(type) { + case *v1.Metrics: + if metrics.Memory != nil && metrics.Memory.Usage != nil { + workingSetBytes := getWorkingSet(metrics.Memory) + + return &runtime.MemoryUsage{ + Timestamp: timestamp.UnixNano(), + WorkingSetBytes: &runtime.UInt64Value{ + Value: workingSetBytes, + }, + AvailableBytes: &runtime.UInt64Value{Value: getAvailableBytes(metrics.Memory, workingSetBytes)}, + UsageBytes: &runtime.UInt64Value{Value: metrics.Memory.Usage.Usage}, + RssBytes: &runtime.UInt64Value{Value: metrics.Memory.TotalRSS}, + PageFaults: &runtime.UInt64Value{Value: metrics.Memory.TotalPgFault}, + MajorPageFaults: &runtime.UInt64Value{Value: metrics.Memory.TotalPgMajFault}, + }, nil + } + case *v2.Metrics: + if metrics.Memory != nil { + workingSetBytes := getWorkingSetV2(metrics.Memory) + + return &runtime.MemoryUsage{ + Timestamp: timestamp.UnixNano(), + WorkingSetBytes: &runtime.UInt64Value{ + Value: workingSetBytes, + }, + AvailableBytes: &runtime.UInt64Value{Value: getAvailableBytesV2(metrics.Memory, workingSetBytes)}, + UsageBytes: &runtime.UInt64Value{Value: metrics.Memory.Usage}, + // Use Anon memory for RSS as cAdvisor on cgroupv2 + // see https://github.com/google/cadvisor/blob/a9858972e75642c2b1914c8d5428e33e6392c08a/container/libcontainer/handler.go#L799 + RssBytes: &runtime.UInt64Value{Value: metrics.Memory.Anon}, + PageFaults: &runtime.UInt64Value{Value: metrics.Memory.Pgfault}, + MajorPageFaults: &runtime.UInt64Value{Value: metrics.Memory.Pgmajfault}, + }, nil + } + default: + return nil, fmt.Errorf("unexpected metrics type: %v", metrics) + } + return nil, nil +} diff --git a/pkg/cri/server/container_stats_list_linux_test.go b/pkg/cri/server/container_stats_list_linux_test.go index a35b5f2..dba05fe 100644 --- a/pkg/cri/server/container_stats_list_linux_test.go +++ b/pkg/cri/server/container_stats_list_linux_test.go @@ -17,10 +17,15 @@ package server import ( + "math" "testing" + "time" v1 "github.com/containerd/cgroups/stats/v1" + v2 "github.com/containerd/cgroups/v2/stats" + containerstore "github.com/containerd/containerd/pkg/cri/store/container" "github.com/stretchr/testify/assert" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestGetWorkingSet(t *testing.T) { @@ -53,3 +58,273 @@ func TestGetWorkingSet(t *testing.T) { }) } } + +func TestGetWorkingSetV2(t *testing.T) { + for desc, test := range map[string]struct { + memory *v2.MemoryStat + expected uint64 + }{ + "nil memory usage": { + memory: &v2.MemoryStat{}, + expected: 0, + }, + "memory usage higher than inactive_total_file": { + memory: &v2.MemoryStat{ + InactiveFile: 1000, + Usage: 2000, + }, + expected: 1000, + }, + "memory usage lower than inactive_total_file": { + memory: &v2.MemoryStat{ + InactiveFile: 2000, + Usage: 1000, + }, + expected: 0, + }, + } { + t.Run(desc, func(t *testing.T) { + got := getWorkingSetV2(test.memory) + assert.Equal(t, test.expected, got) + }) + } +} + +func TestGetAvailableBytes(t *testing.T) { + for desc, test := range map[string]struct { + memory *v1.MemoryStat + workingSetBytes uint64 + expected uint64 + }{ + + "no limit": { + memory: &v1.MemoryStat{ + Usage: &v1.MemoryEntry{ + Limit: math.MaxUint64, // no limit + Usage: 1000, + }, + }, + workingSetBytes: 500, + expected: 0, + }, + "with limit": { + memory: &v1.MemoryStat{ + Usage: &v1.MemoryEntry{ + Limit: 5000, + Usage: 1000, + }, + }, + workingSetBytes: 500, + expected: 5000 - 500, + }, + } { + t.Run(desc, func(t *testing.T) { + got := getAvailableBytes(test.memory, test.workingSetBytes) + assert.Equal(t, test.expected, got) + }) + } +} + +func TestGetAvailableBytesV2(t *testing.T) { + for desc, test := range map[string]struct { + memory *v2.MemoryStat + workingSetBytes uint64 + expected uint64 + }{ + + "no limit": { + memory: &v2.MemoryStat{ + UsageLimit: math.MaxUint64, // no limit + Usage: 1000, + }, + workingSetBytes: 500, + expected: 0, + }, + "with limit": { + memory: &v2.MemoryStat{ + UsageLimit: 5000, + Usage: 1000, + }, + workingSetBytes: 500, + expected: 5000 - 500, + }, + } { + t.Run(desc, func(t *testing.T) { + got := getAvailableBytesV2(test.memory, test.workingSetBytes) + assert.Equal(t, test.expected, got) + }) + } +} + +func TestContainerMetricsCPU(t *testing.T) { + c := newTestCRIService() + timestamp := time.Now() + secondAfterTimeStamp := timestamp.Add(time.Second) + ID := "ID" + + for desc, test := range map[string]struct { + firstMetrics interface{} + secondMetrics interface{} + expectedFirst *runtime.CpuUsage + expectedSecond *runtime.CpuUsage + }{ + + "v1 metrics": { + firstMetrics: &v1.Metrics{ + CPU: &v1.CPUStat{ + Usage: &v1.CPUUsage{ + Total: 50, + }, + }, + }, + secondMetrics: &v1.Metrics{ + CPU: &v1.CPUStat{ + Usage: &v1.CPUUsage{ + Total: 500, + }, + }, + }, + expectedFirst: &runtime.CpuUsage{ + Timestamp: timestamp.UnixNano(), + UsageCoreNanoSeconds: &runtime.UInt64Value{Value: 50}, + UsageNanoCores: &runtime.UInt64Value{Value: 0}, + }, + expectedSecond: &runtime.CpuUsage{ + Timestamp: secondAfterTimeStamp.UnixNano(), + UsageCoreNanoSeconds: &runtime.UInt64Value{Value: 500}, + UsageNanoCores: &runtime.UInt64Value{Value: 450}, + }, + }, + } { + t.Run(desc, func(t *testing.T) { + container, err := containerstore.NewContainer( + containerstore.Metadata{ID: ID}, + ) + assert.NoError(t, err) + assert.Nil(t, container.Stats) + err = c.containerStore.Add(container) + assert.NoError(t, err) + + cpuUsage, err := c.cpuContainerStats(ID, false, test.firstMetrics, timestamp) + assert.NoError(t, err) + + container, err = c.containerStore.Get(ID) + assert.NoError(t, err) + assert.NotNil(t, container.Stats) + + assert.Equal(t, test.expectedFirst, cpuUsage) + + cpuUsage, err = c.cpuContainerStats(ID, false, test.secondMetrics, secondAfterTimeStamp) + assert.NoError(t, err) + assert.Equal(t, test.expectedSecond, cpuUsage) + + container, err = c.containerStore.Get(ID) + assert.NoError(t, err) + assert.NotNil(t, container.Stats) + }) + } + +} + +func TestContainerMetricsMemory(t *testing.T) { + c := newTestCRIService() + timestamp := time.Now() + + for desc, test := range map[string]struct { + metrics interface{} + expected *runtime.MemoryUsage + }{ + "v1 metrics - no memory limit": { + metrics: &v1.Metrics{ + Memory: &v1.MemoryStat{ + Usage: &v1.MemoryEntry{ + Limit: math.MaxUint64, // no limit + Usage: 1000, + }, + TotalRSS: 10, + TotalPgFault: 11, + TotalPgMajFault: 12, + TotalInactiveFile: 500, + }, + }, + expected: &runtime.MemoryUsage{ + Timestamp: timestamp.UnixNano(), + WorkingSetBytes: &runtime.UInt64Value{Value: 500}, + AvailableBytes: &runtime.UInt64Value{Value: 0}, + UsageBytes: &runtime.UInt64Value{Value: 1000}, + RssBytes: &runtime.UInt64Value{Value: 10}, + PageFaults: &runtime.UInt64Value{Value: 11}, + MajorPageFaults: &runtime.UInt64Value{Value: 12}, + }, + }, + "v1 metrics - memory limit": { + metrics: &v1.Metrics{ + Memory: &v1.MemoryStat{ + Usage: &v1.MemoryEntry{ + Limit: 5000, + Usage: 1000, + }, + TotalRSS: 10, + TotalPgFault: 11, + TotalPgMajFault: 12, + TotalInactiveFile: 500, + }, + }, + expected: &runtime.MemoryUsage{ + Timestamp: timestamp.UnixNano(), + WorkingSetBytes: &runtime.UInt64Value{Value: 500}, + AvailableBytes: &runtime.UInt64Value{Value: 4500}, + UsageBytes: &runtime.UInt64Value{Value: 1000}, + RssBytes: &runtime.UInt64Value{Value: 10}, + PageFaults: &runtime.UInt64Value{Value: 11}, + MajorPageFaults: &runtime.UInt64Value{Value: 12}, + }, + }, + "v2 metrics - memory limit": { + metrics: &v2.Metrics{ + Memory: &v2.MemoryStat{ + Usage: 1000, + UsageLimit: 5000, + InactiveFile: 0, + Pgfault: 11, + Pgmajfault: 12, + }, + }, + expected: &runtime.MemoryUsage{ + Timestamp: timestamp.UnixNano(), + WorkingSetBytes: &runtime.UInt64Value{Value: 1000}, + AvailableBytes: &runtime.UInt64Value{Value: 4000}, + UsageBytes: &runtime.UInt64Value{Value: 1000}, + RssBytes: &runtime.UInt64Value{Value: 0}, + PageFaults: &runtime.UInt64Value{Value: 11}, + MajorPageFaults: &runtime.UInt64Value{Value: 12}, + }, + }, + "v2 metrics - no memory limit": { + metrics: &v2.Metrics{ + Memory: &v2.MemoryStat{ + Usage: 1000, + UsageLimit: math.MaxUint64, // no limit + InactiveFile: 0, + Pgfault: 11, + Pgmajfault: 12, + }, + }, + expected: &runtime.MemoryUsage{ + Timestamp: timestamp.UnixNano(), + WorkingSetBytes: &runtime.UInt64Value{Value: 1000}, + AvailableBytes: &runtime.UInt64Value{Value: 0}, + UsageBytes: &runtime.UInt64Value{Value: 1000}, + RssBytes: &runtime.UInt64Value{Value: 0}, + PageFaults: &runtime.UInt64Value{Value: 11}, + MajorPageFaults: &runtime.UInt64Value{Value: 12}, + }, + }, + } { + t.Run(desc, func(t *testing.T) { + got, err := c.memoryContainerStats("ID", test.metrics, timestamp) + assert.NoError(t, err) + assert.Equal(t, test.expected, got) + }) + } +} diff --git a/pkg/cri/server/container_stats_list_other.go b/pkg/cri/server/container_stats_list_other.go index 1d5327e..bed0b6b 100644 --- a/pkg/cri/server/container_stats_list_other.go +++ b/pkg/cri/server/container_stats_list_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -19,10 +20,11 @@ package server import ( + "fmt" + "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" ) @@ -32,5 +34,5 @@ func (c *criService) containerMetrics( stats *types.Metric, ) (*runtime.ContainerStats, error) { var cs runtime.ContainerStats - return &cs, errors.Wrap(errdefs.ErrNotImplemented, "container metrics") + return &cs, fmt.Errorf("container metrics: %w", errdefs.ErrNotImplemented) } diff --git a/pkg/cri/server/container_stats_list_windows.go b/pkg/cri/server/container_stats_list_windows.go index e2c2927..e2ec191 100644 --- a/pkg/cri/server/container_stats_list_windows.go +++ b/pkg/cri/server/container_stats_list_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,11 +17,13 @@ package server import ( + "errors" + "fmt" + wstats "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" "github.com/containerd/containerd/api/types" "github.com/containerd/typeurl" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" ) @@ -59,7 +59,7 @@ func (c *criService) containerMetrics( if stats != nil { s, err := typeurl.UnmarshalAny(stats.Data) if err != nil { - return nil, errors.Wrap(err, "failed to extract container metrics") + return nil, fmt.Errorf("failed to extract container metrics: %w", err) } wstats := s.(*wstats.Statistics).GetWindows() if wstats == nil { diff --git a/pkg/cri/server/container_status.go b/pkg/cri/server/container_status.go index 6c9fa39..553ea96 100644 --- a/pkg/cri/server/container_status.go +++ b/pkg/cri/server/container_status.go @@ -18,21 +18,21 @@ package server import ( "encoding/json" + "fmt" + + "github.com/containerd/containerd/errdefs" + containerstore "github.com/containerd/containerd/pkg/cri/store/container" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - - "github.com/containerd/containerd/pkg/cri/store" - containerstore "github.com/containerd/containerd/pkg/cri/store/container" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // ContainerStatus inspects the container and returns the status. func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerStatusRequest) (*runtime.ContainerStatusResponse, error) { container, err := c.containerStore.Get(r.GetContainerId()) if err != nil { - return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId()) + return nil, fmt.Errorf("an error occurred when try to find container %q: %w", r.GetContainerId(), err) } // TODO(random-liu): Clean up the following logic in CRI. @@ -44,8 +44,8 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt imageRef := container.ImageRef image, err := c.imageStore.Get(imageRef) if err != nil { - if err != store.ErrNotExist { - return nil, errors.Wrapf(err, "failed to get image %q", imageRef) + if !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("failed to get image %q: %w", imageRef, err) } } else { repoTags, repoDigests := parseImageReferences(image.References) @@ -60,18 +60,19 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt } } status := toCRIContainerStatus(container, spec, imageRef) + if status.GetCreatedAt() == 0 { // CRI doesn't allow CreatedAt == 0. info, err := container.Container.Info(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to get CreatedAt in %q state", status.State) + return nil, fmt.Errorf("failed to get CreatedAt in %q state: %w", status.State, err) } status.CreatedAt = info.CreatedAt.UnixNano() } info, err := toCRIContainerInfo(ctx, container, r.GetVerbose()) if err != nil { - return nil, errors.Wrap(err, "failed to get verbose container info") + return nil, fmt.Errorf("failed to get verbose container info: %w", err) } return &runtime.ContainerStatusResponse{ @@ -119,6 +120,7 @@ func toCRIContainerStatus(container containerstore.Container, spec *runtime.Imag Annotations: meta.Config.GetAnnotations(), Mounts: meta.Config.GetMounts(), LogPath: meta.LogPath, + Resources: status.Resources, } } @@ -156,26 +158,26 @@ func toCRIContainerInfo(ctx context.Context, container containerstore.Container, var err error ci.RuntimeSpec, err = container.Container.Spec(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get container runtime spec") + return nil, fmt.Errorf("failed to get container runtime spec: %w", err) } ctrInfo, err := container.Container.Info(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get container info") + return nil, fmt.Errorf("failed to get container info: %w", err) } ci.SnapshotKey = ctrInfo.SnapshotKey ci.Snapshotter = ctrInfo.Snapshotter runtimeOptions, err := getRuntimeOptions(ctrInfo) if err != nil { - return nil, errors.Wrap(err, "failed to get runtime options") + return nil, fmt.Errorf("failed to get runtime options: %w", err) } ci.RuntimeType = ctrInfo.Runtime.Name ci.RuntimeOptions = runtimeOptions infoBytes, err := json.Marshal(ci) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal info %v", ci) + return nil, fmt.Errorf("failed to marshal info %v: %w", ci, err) } return map[string]string{ "info": string(infoBytes), diff --git a/pkg/cri/server/container_status_test.go b/pkg/cri/server/container_status_test.go index 956839f..6981de1 100644 --- a/pkg/cri/server/container_status_test.go +++ b/pkg/cri/server/container_status_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" imagestore "github.com/containerd/containerd/pkg/cri/store/image" diff --git a/pkg/cri/server/container_stop.go b/pkg/cri/server/container_stop.go index 56b1075..9948710 100644 --- a/pkg/cri/server/container_stop.go +++ b/pkg/cri/server/container_stop.go @@ -17,35 +17,42 @@ package server import ( + "fmt" "sync/atomic" "syscall" "time" - "github.com/containerd/containerd" eventtypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" - "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - - "github.com/containerd/containerd/pkg/cri/store" containerstore "github.com/containerd/containerd/pkg/cri/store/container" ctrdutil "github.com/containerd/containerd/pkg/cri/util" + + "github.com/moby/sys/signal" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // StopContainer stops a running container with a grace period (i.e., timeout). func (c *criService) StopContainer(ctx context.Context, r *runtime.StopContainerRequest) (*runtime.StopContainerResponse, error) { + start := time.Now() // Get container config from container store. container, err := c.containerStore.Get(r.GetContainerId()) if err != nil { - return nil, errors.Wrapf(err, "an error occurred when try to find container %q", r.GetContainerId()) + return nil, fmt.Errorf("an error occurred when try to find container %q: %w", r.GetContainerId(), err) } if err := c.stopContainer(ctx, container, time.Duration(r.GetTimeout())*time.Second); err != nil { return nil, err } + i, err := container.Container.Info(ctx) + if err != nil { + return nil, fmt.Errorf("get container info: %w", err) + } + + containerStopTimer.WithValues(i.Runtime.Name).UpdateSince(start) + return &runtime.StopContainerResponse{}, nil } @@ -66,11 +73,11 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore task, err := container.Container.Task(ctx, nil) if err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to get task for container %q", id) + return fmt.Errorf("failed to get task for container %q: %w", id, err) } // Don't return for unknown state, some cleanup needs to be done. if state == runtime.ContainerState_CONTAINER_UNKNOWN { - return cleanupUnknownContainer(ctx, id, container) + return cleanupUnknownContainer(ctx, id, container, c) } return nil } @@ -83,9 +90,9 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore exitCh, err := task.Wait(waitCtx) if err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to wait for task for %q", id) + return fmt.Errorf("failed to wait for task for %q: %w", id, err) } - return cleanupUnknownContainer(ctx, id, container) + return cleanupUnknownContainer(ctx, id, container, c) } exitCtx, exitCancel := context.WithCancel(context.Background()) @@ -116,8 +123,8 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore // TODO(random-liu): Remove this logic when containerd 1.2 is deprecated. image, err := c.imageStore.Get(container.ImageRef) if err != nil { - if err != store.ErrNotExist { - return errors.Wrapf(err, "failed to get image %q", container.ImageRef) + if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to get image %q: %w", container.ImageRef, err) } log.G(ctx).Warningf("Image %q not found, stop container with signal %q", container.ImageRef, stopSignal) } else { @@ -126,9 +133,9 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore } } } - sig, err := containerd.ParseSignal(stopSignal) + sig, err := signal.ParseSignal(stopSignal) if err != nil { - return errors.Wrapf(err, "failed to parse stop signal %q", stopSignal) + return fmt.Errorf("failed to parse stop signal %q: %w", stopSignal, err) } var sswt bool @@ -142,7 +149,7 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore if sswt { log.G(ctx).Infof("Stop container %q with signal %v", id, sig) if err = task.Kill(ctx, sig); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to stop container %q", id) + return fmt.Errorf("failed to stop container %q: %w", id, err) } } else { log.G(ctx).Infof("Skipping the sending of signal %v to container %q because a prior stop with timeout>0 request already sent the signal", sig, id) @@ -165,13 +172,13 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore log.G(ctx).Infof("Kill container %q", id) if err = task.Kill(ctx, syscall.SIGKILL); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to kill container %q", id) + return fmt.Errorf("failed to kill container %q: %w", id, err) } // Wait for a fixed timeout until container stop is observed by event monitor. err = c.waitContainerStop(ctx, container) if err != nil { - return errors.Wrapf(err, "an error occurs during waiting for container %q to be killed", id) + return fmt.Errorf("an error occurs during waiting for container %q to be killed: %w", id, err) } return nil } @@ -181,14 +188,14 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore func (c *criService) waitContainerStop(ctx context.Context, container containerstore.Container) error { select { case <-ctx.Done(): - return errors.Wrapf(ctx.Err(), "wait container %q", container.ID) + return fmt.Errorf("wait container %q: %w", container.ID, ctx.Err()) case <-container.Stopped(): return nil } } // cleanupUnknownContainer cleanup stopped container in unknown state. -func cleanupUnknownContainer(ctx context.Context, id string, cntr containerstore.Container) error { +func cleanupUnknownContainer(ctx context.Context, id string, cntr containerstore.Container, c *criService) error { // Reuse handleContainerExit to do the cleanup. return handleContainerExit(ctx, &eventtypes.TaskExit{ ContainerID: id, @@ -196,5 +203,5 @@ func cleanupUnknownContainer(ctx context.Context, id string, cntr containerstore Pid: 0, ExitStatus: unknownExitCode, ExitedAt: time.Now(), - }, cntr) + }, cntr, c) } diff --git a/pkg/cri/server/container_update_resources.go b/pkg/cri/server/container_update_resources.go new file mode 100644 index 0000000..48e6fd6 --- /dev/null +++ b/pkg/cri/server/container_update_resources.go @@ -0,0 +1,136 @@ +//go:build !darwin && !freebsd +// +build !darwin,!freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + gocontext "context" + "fmt" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/typeurl" + runtimespec "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + + containerstore "github.com/containerd/containerd/pkg/cri/store/container" + ctrdutil "github.com/containerd/containerd/pkg/cri/util" +) + +// UpdateContainerResources updates ContainerConfig of the container. +func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) { + container, err := c.containerStore.Get(r.GetContainerId()) + if err != nil { + return nil, fmt.Errorf("failed to find container: %w", err) + } + // Update resources in status update transaction, so that: + // 1) There won't be race condition with container start. + // 2) There won't be concurrent resource update to the same container. + if err := container.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) { + return c.updateContainerResources(ctx, container, r, status) + }); err != nil { + return nil, fmt.Errorf("failed to update resources: %w", err) + } + return &runtime.UpdateContainerResourcesResponse{}, nil +} + +func (c *criService) updateContainerResources(ctx context.Context, + cntr containerstore.Container, + r *runtime.UpdateContainerResourcesRequest, + status containerstore.Status) (newStatus containerstore.Status, retErr error) { + + newStatus = status + id := cntr.ID + // Do not update the container when there is a removal in progress. + if status.Removing { + return newStatus, fmt.Errorf("container %q is in removing state", id) + } + + // Update container spec. If the container is not started yet, updating + // spec makes sure that the resource limits are correct when start; + // if the container is already started, updating spec is still required, + // the spec will become our source of truth for resource limits. + oldSpec, err := cntr.Container.Spec(ctx) + if err != nil { + return newStatus, fmt.Errorf("failed to get container spec: %w", err) + } + newSpec, err := updateOCIResource(ctx, oldSpec, r, c.config) + if err != nil { + return newStatus, fmt.Errorf("failed to update resource in spec: %w", err) + } + + if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil { + return newStatus, err + } + defer func() { + if retErr != nil { + deferCtx, deferCancel := ctrdutil.DeferContext() + defer deferCancel() + // Reset spec on error. + if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id) + } + } else { + // Update container status only when the spec is updated + newStatus = copyResourcesToStatus(newSpec, status) + } + }() + + // If container is not running, only update spec is enough, new resource + // limit will be applied when container start. + if status.State() != runtime.ContainerState_CONTAINER_RUNNING { + return newStatus, nil + } + + task, err := cntr.Container.Task(ctx, nil) + if err != nil { + if errdefs.IsNotFound(err) { + // Task exited already. + return newStatus, nil + } + return newStatus, fmt.Errorf("failed to get task: %w", err) + } + // newSpec.Linux / newSpec.Windows won't be nil + if err := task.Update(ctx, containerd.WithResources(getResources(newSpec))); err != nil { + if errdefs.IsNotFound(err) { + // Task exited already. + return newStatus, nil + } + return newStatus, fmt.Errorf("failed to update resources: %w", err) + } + return newStatus, nil +} + +// updateContainerSpec updates container spec. +func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error { + any, err := typeurl.MarshalAny(spec) + if err != nil { + return fmt.Errorf("failed to marshal spec %+v: %w", spec, err) + } + if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error { + c.Spec = any + return nil + }); err != nil { + return fmt.Errorf("failed to update container spec: %w", err) + } + return nil +} diff --git a/pkg/cri/server/container_update_resources_linux.go b/pkg/cri/server/container_update_resources_linux.go index 787b7a6..a30d194 100644 --- a/pkg/cri/server/container_update_resources_linux.go +++ b/pkg/cri/server/container_update_resources_linux.go @@ -17,132 +17,35 @@ package server import ( - gocontext "context" + "fmt" - "github.com/containerd/containerd" - "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/containerd/typeurl" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + criconfig "github.com/containerd/containerd/pkg/cri/config" "github.com/containerd/containerd/pkg/cri/opts" - containerstore "github.com/containerd/containerd/pkg/cri/store/container" "github.com/containerd/containerd/pkg/cri/util" - ctrdutil "github.com/containerd/containerd/pkg/cri/util" ) -// UpdateContainerResources updates ContainerConfig of the container. -func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) { - container, err := c.containerStore.Get(r.GetContainerId()) - if err != nil { - return nil, errors.Wrap(err, "failed to find container") - } - // Update resources in status update transaction, so that: - // 1) There won't be race condition with container start. - // 2) There won't be concurrent resource update to the same container. - if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) { - return status, c.updateContainerResources(ctx, container, r.GetLinux(), status) - }); err != nil { - return nil, errors.Wrap(err, "failed to update resources") - } - return &runtime.UpdateContainerResourcesResponse{}, nil -} +// updateOCIResource updates container resource limit. +func updateOCIResource(ctx context.Context, spec *runtimespec.Spec, r *runtime.UpdateContainerResourcesRequest, + config criconfig.Config) (*runtimespec.Spec, error) { -func (c *criService) updateContainerResources(ctx context.Context, - cntr containerstore.Container, - resources *runtime.LinuxContainerResources, - status containerstore.Status) (retErr error) { - id := cntr.ID - // Do not update the container when there is a removal in progress. - if status.Removing { - return errors.Errorf("container %q is in removing state", id) - } - - // Update container spec. If the container is not started yet, updating - // spec makes sure that the resource limits are correct when start; - // if the container is already started, updating spec is still required, - // the spec will become our source of truth for resource limits. - oldSpec, err := cntr.Container.Spec(ctx) - if err != nil { - return errors.Wrap(err, "failed to get container spec") - } - newSpec, err := updateOCILinuxResource(ctx, oldSpec, resources, - c.config.TolerateMissingHugetlbController, c.config.DisableHugetlbController) - if err != nil { - return errors.Wrap(err, "failed to update resource in spec") - } - - if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil { - return err - } - defer func() { - if retErr != nil { - deferCtx, deferCancel := ctrdutil.DeferContext() - defer deferCancel() - // Reset spec on error. - if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id) - } - } - }() - - // If container is not running, only update spec is enough, new resource - // limit will be applied when container start. - if status.State() != runtime.ContainerState_CONTAINER_RUNNING { - return nil - } - - task, err := cntr.Container.Task(ctx, nil) - if err != nil { - if errdefs.IsNotFound(err) { - // Task exited already. - return nil - } - return errors.Wrap(err, "failed to get task") - } - // newSpec.Linux won't be nil - if err := task.Update(ctx, containerd.WithResources(newSpec.Linux.Resources)); err != nil { - if errdefs.IsNotFound(err) { - // Task exited already. - return nil - } - return errors.Wrap(err, "failed to update resources") - } - return nil -} - -// updateContainerSpec updates container spec. -func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error { - any, err := typeurl.MarshalAny(spec) - if err != nil { - return errors.Wrapf(err, "failed to marshal spec %+v", spec) - } - if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error { - c.Spec = any - return nil - }); err != nil { - return errors.Wrap(err, "failed to update container spec") - } - return nil -} - -// updateOCILinuxResource updates container resource limit. -func updateOCILinuxResource(ctx context.Context, spec *runtimespec.Spec, new *runtime.LinuxContainerResources, - tolerateMissingHugetlbController, disableHugetlbController bool) (*runtimespec.Spec, error) { // Copy to make sure old spec is not changed. var cloned runtimespec.Spec if err := util.DeepCopy(&cloned, spec); err != nil { - return nil, errors.Wrap(err, "failed to deep copy") + return nil, fmt.Errorf("failed to deep copy: %w", err) } if cloned.Linux == nil { cloned.Linux = &runtimespec.Linux{} } - if err := opts.WithResources(new, tolerateMissingHugetlbController, disableHugetlbController)(ctx, nil, nil, &cloned); err != nil { - return nil, errors.Wrap(err, "unable to set linux container resources") + if err := opts.WithResources(r.GetLinux(), config.TolerateMissingHugetlbController, config.DisableHugetlbController)(ctx, nil, nil, &cloned); err != nil { + return nil, fmt.Errorf("unable to set linux container resources: %w", err) } return &cloned, nil } + +func getResources(spec *runtimespec.Spec) interface{} { + return spec.Linux.Resources +} diff --git a/pkg/cri/server/container_update_resources_linux_test.go b/pkg/cri/server/container_update_resources_linux_test.go index a98ae09..33a6acf 100644 --- a/pkg/cri/server/container_update_resources_linux_test.go +++ b/pkg/cri/server/container_update_resources_linux_test.go @@ -20,19 +20,27 @@ import ( "context" "testing" - "github.com/golang/protobuf/proto" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "google.golang.org/protobuf/proto" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + + criconfig "github.com/containerd/containerd/pkg/cri/config" + criopts "github.com/containerd/containerd/pkg/cri/opts" ) func TestUpdateOCILinuxResource(t *testing.T) { - t.Skip("It requires some privileges not achievable during the build.") oomscoreadj := new(int) *oomscoreadj = -500 + expectedSwap := func(swap int64) *int64 { + if criopts.SwapControllerAvailable() { + return &swap + } + return nil + } for desc, test := range map[string]struct { spec *runtimespec.Spec - resources *runtime.LinuxContainerResources + request *runtime.UpdateContainerResourcesRequest expected *runtimespec.Spec expectErr bool }{ @@ -49,23 +57,30 @@ func TestUpdateOCILinuxResource(t *testing.T) { Cpus: "0-1", Mems: "2-3", }, + Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}, }, }, }, - resources: &runtime.LinuxContainerResources{ - CpuPeriod: 6666, - CpuQuota: 5555, - CpuShares: 4444, - MemoryLimitInBytes: 54321, - OomScoreAdj: 500, - CpusetCpus: "4-5", - CpusetMems: "6-7", + request: &runtime.UpdateContainerResourcesRequest{ + Linux: &runtime.LinuxContainerResources{ + CpuPeriod: 6666, + CpuQuota: 5555, + CpuShares: 4444, + MemoryLimitInBytes: 54321, + OomScoreAdj: 500, + CpusetCpus: "4-5", + CpusetMems: "6-7", + Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "0"}, + }, }, expected: &runtimespec.Spec{ Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, Linux: &runtimespec.Linux{ Resources: &runtimespec.LinuxResources{ - Memory: &runtimespec.LinuxMemory{Limit: proto.Int64(54321)}, + Memory: &runtimespec.LinuxMemory{ + Limit: proto.Int64(54321), + Swap: expectedSwap(54321), + }, CPU: &runtimespec.LinuxCPU{ Shares: proto.Uint64(4444), Quota: proto.Int64(5555), @@ -73,6 +88,7 @@ func TestUpdateOCILinuxResource(t *testing.T) { Cpus: "4-5", Mems: "6-7", }, + Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "0"}, }, }, }, @@ -90,21 +106,27 @@ func TestUpdateOCILinuxResource(t *testing.T) { Cpus: "0-1", Mems: "2-3", }, + Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}, }, }, }, - resources: &runtime.LinuxContainerResources{ - CpuQuota: 5555, - CpuShares: 4444, - MemoryLimitInBytes: 54321, - OomScoreAdj: 500, - CpusetMems: "6-7", + request: &runtime.UpdateContainerResourcesRequest{ + Linux: &runtime.LinuxContainerResources{ + CpuQuota: 5555, + CpuShares: 4444, + MemoryLimitInBytes: 54321, + OomScoreAdj: 500, + CpusetMems: "6-7", + }, }, expected: &runtimespec.Spec{ Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, Linux: &runtimespec.Linux{ Resources: &runtimespec.LinuxResources{ - Memory: &runtimespec.LinuxMemory{Limit: proto.Int64(54321)}, + Memory: &runtimespec.LinuxMemory{ + Limit: proto.Int64(54321), + Swap: expectedSwap(54321), + }, CPU: &runtimespec.LinuxCPU{ Shares: proto.Uint64(4444), Quota: proto.Int64(5555), @@ -112,6 +134,7 @@ func TestUpdateOCILinuxResource(t *testing.T) { Cpus: "0-1", Mems: "6-7", }, + Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}, }, }, }, @@ -125,20 +148,26 @@ func TestUpdateOCILinuxResource(t *testing.T) { }, }, }, - resources: &runtime.LinuxContainerResources{ - CpuPeriod: 6666, - CpuQuota: 5555, - CpuShares: 4444, - MemoryLimitInBytes: 54321, - OomScoreAdj: 500, - CpusetCpus: "4-5", - CpusetMems: "6-7", + request: &runtime.UpdateContainerResourcesRequest{ + Linux: &runtime.LinuxContainerResources{ + CpuPeriod: 6666, + CpuQuota: 5555, + CpuShares: 4444, + MemoryLimitInBytes: 54321, + OomScoreAdj: 500, + CpusetCpus: "4-5", + CpusetMems: "6-7", + Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}, + }, }, expected: &runtimespec.Spec{ Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, Linux: &runtimespec.Linux{ Resources: &runtimespec.LinuxResources{ - Memory: &runtimespec.LinuxMemory{Limit: proto.Int64(54321)}, + Memory: &runtimespec.LinuxMemory{ + Limit: proto.Int64(54321), + Swap: expectedSwap(54321), + }, CPU: &runtimespec.LinuxCPU{ Shares: proto.Uint64(4444), Quota: proto.Int64(5555), @@ -146,13 +175,69 @@ func TestUpdateOCILinuxResource(t *testing.T) { Cpus: "4-5", Mems: "6-7", }, + Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}, + }, + }, + }, + }, + "should be able to patch the unified map": { + spec: &runtimespec.Spec{ + Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, + Linux: &runtimespec.Linux{ + Resources: &runtimespec.LinuxResources{ + Memory: &runtimespec.LinuxMemory{Limit: proto.Int64(12345)}, + CPU: &runtimespec.LinuxCPU{ + Shares: proto.Uint64(1111), + Quota: proto.Int64(2222), + Period: proto.Uint64(3333), + Cpus: "0-1", + Mems: "2-3", + }, + Unified: map[string]string{"memory.min": "65536", "memory.max": "1507328"}, + }, + }, + }, + request: &runtime.UpdateContainerResourcesRequest{ + Linux: &runtime.LinuxContainerResources{ + CpuPeriod: 6666, + CpuQuota: 5555, + CpuShares: 4444, + MemoryLimitInBytes: 54321, + OomScoreAdj: 500, + CpusetCpus: "4-5", + CpusetMems: "6-7", + Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "1024"}, + }, + }, + expected: &runtimespec.Spec{ + Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, + Linux: &runtimespec.Linux{ + Resources: &runtimespec.LinuxResources{ + Memory: &runtimespec.LinuxMemory{ + Limit: proto.Int64(54321), + Swap: expectedSwap(54321), + }, + CPU: &runtimespec.LinuxCPU{ + Shares: proto.Uint64(4444), + Quota: proto.Int64(5555), + Period: proto.Uint64(6666), + Cpus: "4-5", + Mems: "6-7", + }, + Unified: map[string]string{"memory.min": "1507328", "memory.max": "1507328", "memory.swap.max": "1024"}, }, }, }, }, } { t.Logf("TestCase %q", desc) - got, err := updateOCILinuxResource(context.Background(), test.spec, test.resources, false, false) + config := criconfig.Config{ + PluginConfig: criconfig.PluginConfig{ + TolerateMissingHugetlbController: true, + DisableHugetlbController: false, + }, + } + got, err := updateOCIResource(context.Background(), test.spec, test.request, config) if test.expectErr { assert.Error(t, err) } else { diff --git a/pkg/cri/server/container_update_resources_other.go b/pkg/cri/server/container_update_resources_other.go index 88bb1ef..b59c470 100644 --- a/pkg/cri/server/container_update_resources_other.go +++ b/pkg/cri/server/container_update_resources_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -19,9 +20,10 @@ package server import ( - "github.com/pkg/errors" - "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "context" + "fmt" + + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" containerstore "github.com/containerd/containerd/pkg/cri/store/container" ) @@ -30,7 +32,7 @@ import ( func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) { container, err := c.containerStore.Get(r.GetContainerId()) if err != nil { - return nil, errors.Wrap(err, "failed to find container") + return nil, fmt.Errorf("failed to find container: %w", err) } // Update resources in status update transaction, so that: // 1) There won't be race condition with container start. @@ -38,7 +40,7 @@ func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.Up if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) { return status, nil }); err != nil { - return nil, errors.Wrap(err, "failed to update resources") + return nil, fmt.Errorf("failed to update resources: %w", err) } return &runtime.UpdateContainerResourcesResponse{}, nil } diff --git a/pkg/cri/server/container_update_resources_windows.go b/pkg/cri/server/container_update_resources_windows.go index 72d0d45..74ce4b3 100644 --- a/pkg/cri/server/container_update_resources_windows.go +++ b/pkg/cri/server/container_update_resources_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,13 +17,35 @@ package server import ( - "github.com/containerd/containerd/errdefs" + "fmt" + + runtimespec "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + + criconfig "github.com/containerd/containerd/pkg/cri/config" + "github.com/containerd/containerd/pkg/cri/opts" + "github.com/containerd/containerd/pkg/cri/util" ) -// UpdateContainerResources updates ContainerConfig of the container. -// TODO(windows): Figure out whether windows support this. -func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (*runtime.UpdateContainerResourcesResponse, error) { - return nil, errdefs.ErrNotImplemented +// updateOCIResource updates container resource limit. +func updateOCIResource(ctx context.Context, spec *runtimespec.Spec, r *runtime.UpdateContainerResourcesRequest, + config criconfig.Config) (*runtimespec.Spec, error) { + + // Copy to make sure old spec is not changed. + var cloned runtimespec.Spec + if err := util.DeepCopy(&cloned, spec); err != nil { + return nil, fmt.Errorf("failed to deep copy: %w", err) + } + if cloned.Windows == nil { + cloned.Windows = &runtimespec.Windows{} + } + if err := opts.WithWindowsResources(r.GetWindows())(ctx, nil, nil, &cloned); err != nil { + return nil, fmt.Errorf("unable to set windows container resources: %w", err) + } + return &cloned, nil +} + +func getResources(spec *runtimespec.Spec) interface{} { + return spec.Windows.Resources } diff --git a/pkg/cri/server/events.go b/pkg/cri/server/events.go index 8d66319..6622f44 100644 --- a/pkg/cri/server/events.go +++ b/pkg/cri/server/events.go @@ -17,26 +17,26 @@ package server import ( + "errors" + "fmt" "sync" "time" "github.com/containerd/containerd" eventtypes "github.com/containerd/containerd/api/events" + apitasks "github.com/containerd/containerd/api/services/tasks/v1" containerdio "github.com/containerd/containerd/cio" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" - "github.com/containerd/typeurl" - gogotypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/context" - "k8s.io/apimachinery/pkg/util/clock" - "github.com/containerd/containerd/pkg/cri/constants" - "github.com/containerd/containerd/pkg/cri/store" containerstore "github.com/containerd/containerd/pkg/cri/store/container" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" ctrdutil "github.com/containerd/containerd/pkg/cri/util" + "github.com/containerd/typeurl" + gogotypes "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "k8s.io/utils/clock" ) const ( @@ -137,12 +137,12 @@ func (em *eventMonitor) startSandboxExitMonitor(ctx context.Context, id string, sb, err := em.c.sandboxStore.Get(e.ID) if err == nil { - if err := handleSandboxExit(dctx, e, sb); err != nil { + if err := handleSandboxExit(dctx, e, sb, em.c); err != nil { return err } return nil - } else if err != store.ErrNotExist { - return errors.Wrapf(err, "failed to get sandbox %s", e.ID) + } else if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to get sandbox %s: %w", e.ID, err) } return nil }() @@ -188,12 +188,12 @@ func (em *eventMonitor) startContainerExitMonitor(ctx context.Context, id string cntr, err := em.c.containerStore.Get(e.ID) if err == nil { - if err := handleContainerExit(dctx, e, cntr); err != nil { + if err := handleContainerExit(dctx, e, cntr, em.c); err != nil { return err } return nil - } else if err != store.ErrNotExist { - return errors.Wrapf(err, "failed to get container %s", e.ID) + } else if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to get container %s: %w", e.ID, err) } return nil }() @@ -212,7 +212,7 @@ func convertEvent(e *gogotypes.Any) (string, interface{}, error) { id := "" evt, err := typeurl.UnmarshalAny(e) if err != nil { - return "", nil, errors.Wrap(err, "failed to unmarshalany") + return "", nil, fmt.Errorf("failed to unmarshalany: %w", err) } switch e := evt.(type) { @@ -235,11 +235,11 @@ func convertEvent(e *gogotypes.Any) (string, interface{}, error) { // event monitor. // // NOTE: -// 1. start must be called after subscribe. -// 2. The task exit event has been handled in individual startSandboxExitMonitor -// or startContainerExitMonitor goroutine at the first. If the goroutine fails, -// it puts the event into backoff retry queue and event monitor will handle -// it later. +// 1. start must be called after subscribe. +// 2. The task exit event has been handled in individual startSandboxExitMonitor +// or startContainerExitMonitor goroutine at the first. If the goroutine fails, +// it puts the event into backoff retry queue and event monitor will handle +// it later. func (em *eventMonitor) start() <-chan error { errCh := make(chan error) if em.ch == nil || em.errCh == nil { @@ -273,7 +273,7 @@ func (em *eventMonitor) start() <-chan error { case err := <-em.errCh: // Close errCh in defer directly if there is no error. if err != nil { - logrus.WithError(err).Errorf("Failed to handle event stream") + logrus.WithError(err).Error("Failed to handle event stream") errCh <- err } return @@ -314,21 +314,21 @@ func (em *eventMonitor) handleEvent(any interface{}) error { // Use ID instead of ContainerID to rule out TaskExit event for exec. cntr, err := em.c.containerStore.Get(e.ID) if err == nil { - if err := handleContainerExit(ctx, e, cntr); err != nil { - return errors.Wrap(err, "failed to handle container TaskExit event") + if err := handleContainerExit(ctx, e, cntr, em.c); err != nil { + return fmt.Errorf("failed to handle container TaskExit event: %w", err) } return nil - } else if err != store.ErrNotExist { - return errors.Wrap(err, "can't find container for TaskExit event") + } else if !errdefs.IsNotFound(err) { + return fmt.Errorf("can't find container for TaskExit event: %w", err) } sb, err := em.c.sandboxStore.Get(e.ID) if err == nil { - if err := handleSandboxExit(ctx, e, sb); err != nil { - return errors.Wrap(err, "failed to handle sandbox TaskExit event") + if err := handleSandboxExit(ctx, e, sb, em.c); err != nil { + return fmt.Errorf("failed to handle sandbox TaskExit event: %w", err) } return nil - } else if err != store.ErrNotExist { - return errors.Wrap(err, "can't find sandbox for TaskExit event") + } else if !errdefs.IsNotFound(err) { + return fmt.Errorf("can't find sandbox for TaskExit event: %w", err) } return nil case *eventtypes.TaskOOM: @@ -336,8 +336,8 @@ func (em *eventMonitor) handleEvent(any interface{}) error { // For TaskOOM, we only care which container it belongs to. cntr, err := em.c.containerStore.Get(e.ContainerID) if err != nil { - if err != store.ErrNotExist { - return errors.Wrap(err, "can't find container for TaskOOM event") + if !errdefs.IsNotFound(err) { + return fmt.Errorf("can't find container for TaskOOM event: %w", err) } return nil } @@ -346,7 +346,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error { return status, nil }) if err != nil { - return errors.Wrap(err, "failed to update container status for TaskOOM event") + return fmt.Errorf("failed to update container status for TaskOOM event: %w", err) } case *eventtypes.ImageCreate: logrus.Infof("ImageCreate event %+v", e) @@ -363,7 +363,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error { } // handleContainerExit handles TaskExit event for container. -func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr containerstore.Container) error { +func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr containerstore.Container, c *criService) error { // Attach container IO so that `Delete` could cleanup the stream properly. task, err := cntr.Container.Task(ctx, func(*containerdio.FIFOSet) (containerdio.IO, error) { @@ -381,17 +381,62 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta ) if err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to load task for container") + return fmt.Errorf("failed to load task for container: %w", err) } } else { // TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker if _, err = task.Delete(ctx, WithNRISandboxDelete(cntr.SandboxID), containerd.WithProcessKill); err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to stop container") + return fmt.Errorf("failed to stop container: %w", err) } // Move on to make sure container status is updated. } } + + // NOTE: Both sb.Container.Task and task.Delete interface always ensures + // that the status of target task. However, the interfaces return + // ErrNotFound, which doesn't mean that the shim instance doesn't exist. + // + // There are two caches for task in containerd: + // + // 1. io.containerd.service.v1.tasks-service + // 2. io.containerd.runtime.v2.task + // + // First one is to maintain the shim connection and shutdown the shim + // in Delete API. And the second one is to maintain the lifecycle of + // task in shim server. + // + // So, if the shim instance is running and task has been deleted in shim + // server, the sb.Container.Task and task.Delete will receive the + // ErrNotFound. If we don't delete the shim instance in io.containerd.service.v1.tasks-service, + // shim will be leaky. + // + // Based on containerd/containerd#7496 issue, when host is under IO + // pressure, the umount2 syscall will take more than 10 seconds so that + // the CRI plugin will cancel this task.Delete call. However, the shim + // server isn't aware about this. After return from umount2 syscall, the + // shim server continue delete the task record. And then CRI plugin + // retries to delete task and retrieves ErrNotFound and marks it as + // stopped. Therefore, The shim is leaky. + // + // It's hard to handle the connection lost or request canceled cases in + // shim server. We should call Delete API to io.containerd.service.v1.tasks-service + // to ensure that shim instance is shutdown. + // + // REF: + // 1. https://github.com/containerd/containerd/issues/7496#issuecomment-1671100968 + // 2. https://github.com/containerd/containerd/issues/8931 + if errdefs.IsNotFound(err) { + _, err = c.client.TaskService().Delete(ctx, &apitasks.DeleteTaskRequest{ContainerID: cntr.Container.ID()}) + if err != nil { + err = errdefs.FromGRPC(err) + if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to cleanup container %s in task-service: %w", cntr.Container.ID(), err) + } + } + logrus.Infof("Ensure that container %s in task-service has been cleanup successfully", cntr.Container.ID()) + } + err = cntr.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) { if status.FinishedAt == 0 { status.Pid = 0 @@ -408,7 +453,7 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta return status, nil }) if err != nil { - return errors.Wrap(err, "failed to update container state") + return fmt.Errorf("failed to update container state: %w", err) } // Using channel to propagate the information of container stop cntr.Stop() @@ -416,29 +461,73 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta } // handleSandboxExit handles TaskExit event for sandbox. -func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxstore.Sandbox) error { +func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxstore.Sandbox, c *criService) error { // No stream attached to sandbox container. task, err := sb.Container.Task(ctx, nil) if err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to load task for sandbox") + return fmt.Errorf("failed to load task for sandbox: %w", err) } } else { // TODO(random-liu): [P1] This may block the loop, we may want to spawn a worker if _, err = task.Delete(ctx, WithNRISandboxDelete(sb.ID), containerd.WithProcessKill); err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to stop sandbox") + return fmt.Errorf("failed to stop sandbox: %w", err) } // Move on to make sure container status is updated. } } + + // NOTE: Both sb.Container.Task and task.Delete interface always ensures + // that the status of target task. However, the interfaces return + // ErrNotFound, which doesn't mean that the shim instance doesn't exist. + // + // There are two caches for task in containerd: + // + // 1. io.containerd.service.v1.tasks-service + // 2. io.containerd.runtime.v2.task + // + // First one is to maintain the shim connection and shutdown the shim + // in Delete API. And the second one is to maintain the lifecycle of + // task in shim server. + // + // So, if the shim instance is running and task has been deleted in shim + // server, the sb.Container.Task and task.Delete will receive the + // ErrNotFound. If we don't delete the shim instance in io.containerd.service.v1.tasks-service, + // shim will be leaky. + // + // Based on containerd/containerd#7496 issue, when host is under IO + // pressure, the umount2 syscall will take more than 10 seconds so that + // the CRI plugin will cancel this task.Delete call. However, the shim + // server isn't aware about this. After return from umount2 syscall, the + // shim server continue delete the task record. And then CRI plugin + // retries to delete task and retrieves ErrNotFound and marks it as + // stopped. Therefore, The shim is leaky. + // + // It's hard to handle the connection lost or request canceled cases in + // shim server. We should call Delete API to io.containerd.service.v1.tasks-service + // to ensure that shim instance is shutdown. + // + // REF: + // 1. https://github.com/containerd/containerd/issues/7496#issuecomment-1671100968 + // 2. https://github.com/containerd/containerd/issues/8931 + if errdefs.IsNotFound(err) { + _, err = c.client.TaskService().Delete(ctx, &apitasks.DeleteTaskRequest{ContainerID: sb.Container.ID()}) + if err != nil { + err = errdefs.FromGRPC(err) + if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to cleanup sandbox %s in task-service: %w", sb.Container.ID(), err) + } + } + logrus.Infof("Ensure that sandbox %s in task-service has been cleanup successfully", sb.Container.ID()) + } err = sb.Status.Update(func(status sandboxstore.Status) (sandboxstore.Status, error) { status.State = sandboxstore.StateNotReady status.Pid = 0 return status, nil }) if err != nil { - return errors.Wrap(err, "failed to update sandbox state") + return fmt.Errorf("failed to update sandbox state: %w", err) } // Using channel to propagate the information of sandbox stop sb.Stop() diff --git a/pkg/cri/server/events_test.go b/pkg/cri/server/events_test.go index c7b49aa..163aefd 100644 --- a/pkg/cri/server/events_test.go +++ b/pkg/cri/server/events_test.go @@ -23,13 +23,13 @@ import ( eventtypes "github.com/containerd/containerd/api/events" "github.com/containerd/typeurl" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/util/clock" + testingclock "k8s.io/utils/clock/testing" ) // TestBackOff tests the logic of backOff struct. func TestBackOff(t *testing.T) { testStartTime := time.Now() - testClock := clock.NewFakeClock(testStartTime) + testClock := testingclock.NewFakeClock(testStartTime) inputQueues := map[string]*backOffQueue{ "container1": { events: []interface{}{ diff --git a/pkg/cri/server/helpers.go b/pkg/cri/server/helpers.go index 8b400cb..4ee0ae8 100644 --- a/pkg/cri/server/helpers.go +++ b/pkg/cri/server/helpers.go @@ -23,28 +23,28 @@ import ( "strconv" "strings" - runhcsoptions "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" "github.com/containerd/containerd" "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" clabels "github.com/containerd/containerd/labels" + criconfig "github.com/containerd/containerd/pkg/cri/config" + containerstore "github.com/containerd/containerd/pkg/cri/store/container" + imagestore "github.com/containerd/containerd/pkg/cri/store/image" + sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" + runtimeoptions "github.com/containerd/containerd/pkg/runtimeoptions/v1" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/reference/docker" "github.com/containerd/containerd/runtime/linux/runctypes" runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/typeurl" + runtimespec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + + runhcsoptions "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" imagedigest "github.com/opencontainers/go-digest" "github.com/pelletier/go-toml" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - - criconfig "github.com/containerd/containerd/pkg/cri/config" - "github.com/containerd/containerd/pkg/cri/store" - containerstore "github.com/containerd/containerd/pkg/cri/store/container" - imagestore "github.com/containerd/containerd/pkg/cri/store/image" - sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" - runtimeoptions "github.com/containerd/containerd/pkg/runtimeoptions/v1" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) const ( @@ -78,10 +78,6 @@ const ( containerKindSandbox = "sandbox" // containerKindContainer is a label value indicating container is application container containerKindContainer = "container" - // imageLabelKey is the label key indicating the image is managed by cri plugin. - imageLabelKey = criContainerdPrefix + ".image" - // imageLabelValue is the label value indicating the image is managed by cri plugin. - imageLabelValue = "managed" // sandboxMetadataExtension is an extension name that identify metadata of sandbox in CreateContainerRequest sandboxMetadataExtension = criContainerdPrefix + ".sandbox.metadata" // containerMetadataExtension is an extension name that identify metadata of container in CreateContainerRequest @@ -110,11 +106,11 @@ func makeSandboxName(s *runtime.PodSandboxMetadata) string { // unique. func makeContainerName(c *runtime.ContainerMetadata, s *runtime.PodSandboxMetadata) string { return strings.Join([]string{ - c.Name, // 0 + c.Name, // 0: container name s.Name, // 1: pod name s.Namespace, // 2: pod namespace s.Uid, // 3: pod uid - fmt.Sprintf("%d", c.Attempt), // 4 + fmt.Sprintf("%d", c.Attempt), // 4: attempt number of creating the container }, nameDelimiter) } @@ -196,7 +192,7 @@ func (c *criService) localResolve(refOrID string) (imagestore.Image, error) { func (c *criService) toContainerdImage(ctx context.Context, image imagestore.Image) (containerd.Image, error) { // image should always have at least one reference. if len(image.References) == 0 { - return nil, errors.Errorf("invalid image with no reference %q", image.ID) + return nil, fmt.Errorf("invalid image with no reference %q", image.ID) } return c.client.GetImage(ctx, image.References[0]) } @@ -224,8 +220,8 @@ func getUserFromImage(user string) (*int64, string) { // pulled yet, the function will pull the image. func (c *criService) ensureImageExists(ctx context.Context, ref string, config *runtime.PodSandboxConfig) (*imagestore.Image, error) { image, err := c.localResolve(ref) - if err != nil && err != store.ErrNotExist { - return nil, errors.Wrapf(err, "failed to get image %q", ref) + if err != nil && !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("failed to get image %q: %w", ref, err) } if err == nil { return &image, nil @@ -233,13 +229,13 @@ func (c *criService) ensureImageExists(ctx context.Context, ref string, config * // Pull image to ensure the image exists resp, err := c.PullImage(ctx, &runtime.PullImageRequest{Image: &runtime.ImageSpec{Image: ref}, SandboxConfig: config}) if err != nil { - return nil, errors.Wrapf(err, "failed to pull image %q", ref) + return nil, fmt.Errorf("failed to pull image %q: %w", ref, err) } imageID := resp.GetImageRef() newImage, err := c.imageStore.Get(imageID) if err != nil { // It's still possible that someone removed the image right after it is pulled. - return nil, errors.Wrapf(err, "failed to get image %q after pulling", imageID) + return nil, fmt.Errorf("failed to get image %q after pulling: %w", imageID, err) } return &newImage, nil } @@ -251,18 +247,18 @@ func (c *criService) ensureImageExists(ctx context.Context, ref string, config * func (c *criService) validateTargetContainer(sandboxID, targetContainerID string) (containerstore.Container, error) { targetContainer, err := c.containerStore.Get(targetContainerID) if err != nil { - return containerstore.Container{}, errors.Wrapf(err, "container %q does not exist", targetContainerID) + return containerstore.Container{}, fmt.Errorf("container %q does not exist: %w", targetContainerID, err) } targetSandboxID := targetContainer.Metadata.SandboxID if targetSandboxID != sandboxID { return containerstore.Container{}, - errors.Errorf("container %q (sandbox %s) does not belong to sandbox %s", targetContainerID, targetSandboxID, sandboxID) + fmt.Errorf("container %q (sandbox %s) does not belong to sandbox %s", targetContainerID, targetSandboxID, sandboxID) } status := targetContainer.Status.Get() if state := status.State(); state != runtime.ContainerState_CONTAINER_RUNNING { - return containerstore.Container{}, errors.Errorf("container %q is not running - in state %s", targetContainerID, state) + return containerstore.Container{}, fmt.Errorf("container %q is not running - in state %s", targetContainerID, state) } return targetContainer, nil @@ -429,3 +425,87 @@ func getPassthroughAnnotations(podAnnotations map[string]string, } return passthroughAnnotations } + +// copyResourcesToStatus copys container resource contraints from spec to +// container status. +// This will need updates when new fields are added to ContainerResources. +func copyResourcesToStatus(spec *runtimespec.Spec, status containerstore.Status) containerstore.Status { + status.Resources = &runtime.ContainerResources{} + if spec.Linux != nil { + status.Resources.Linux = &runtime.LinuxContainerResources{} + + if spec.Process != nil && spec.Process.OOMScoreAdj != nil { + status.Resources.Linux.OomScoreAdj = int64(*spec.Process.OOMScoreAdj) + } + + if spec.Linux.Resources == nil { + return status + } + + if spec.Linux.Resources.CPU != nil { + if spec.Linux.Resources.CPU.Period != nil { + status.Resources.Linux.CpuPeriod = int64(*spec.Linux.Resources.CPU.Period) + } + if spec.Linux.Resources.CPU.Quota != nil { + status.Resources.Linux.CpuQuota = *spec.Linux.Resources.CPU.Quota + } + if spec.Linux.Resources.CPU.Shares != nil { + status.Resources.Linux.CpuShares = int64(*spec.Linux.Resources.CPU.Shares) + } + status.Resources.Linux.CpusetCpus = spec.Linux.Resources.CPU.Cpus + status.Resources.Linux.CpusetMems = spec.Linux.Resources.CPU.Mems + } + + if spec.Linux.Resources.Memory != nil { + if spec.Linux.Resources.Memory.Limit != nil { + status.Resources.Linux.MemoryLimitInBytes = *spec.Linux.Resources.Memory.Limit + } + if spec.Linux.Resources.Memory.Swap != nil { + status.Resources.Linux.MemorySwapLimitInBytes = *spec.Linux.Resources.Memory.Swap + } + } + + if spec.Linux.Resources.HugepageLimits != nil { + hugepageLimits := make([]*runtime.HugepageLimit, 0, len(spec.Linux.Resources.HugepageLimits)) + for _, l := range spec.Linux.Resources.HugepageLimits { + hugepageLimits = append(hugepageLimits, &runtime.HugepageLimit{ + PageSize: l.Pagesize, + Limit: l.Limit, + }) + } + status.Resources.Linux.HugepageLimits = hugepageLimits + } + + if spec.Linux.Resources.Unified != nil { + status.Resources.Linux.Unified = spec.Linux.Resources.Unified + } + } + + if spec.Windows != nil { + status.Resources.Windows = &runtime.WindowsContainerResources{} + if spec.Windows.Resources == nil { + return status + } + + if spec.Windows.Resources.CPU != nil { + if spec.Windows.Resources.CPU.Shares != nil { + status.Resources.Windows.CpuShares = int64(*spec.Windows.Resources.CPU.Shares) + } + if spec.Windows.Resources.CPU.Count != nil { + status.Resources.Windows.CpuCount = int64(*spec.Windows.Resources.CPU.Count) + } + if spec.Windows.Resources.CPU.Maximum != nil { + status.Resources.Windows.CpuMaximum = int64(*spec.Windows.Resources.CPU.Maximum) + } + } + + if spec.Windows.Resources.Memory != nil { + if spec.Windows.Resources.Memory.Limit != nil { + status.Resources.Windows.MemoryLimitInBytes = int64(*spec.Windows.Resources.Memory.Limit) + } + } + + // TODO: Figure out how to get RootfsSizeInBytes + } + return status +} diff --git a/pkg/cri/server/helpers_linux.go b/pkg/cri/server/helpers_linux.go index c542c0e..42b2d99 100644 --- a/pkg/cri/server/helpers_linux.go +++ b/pkg/cri/server/helpers_linux.go @@ -36,9 +36,8 @@ import ( "github.com/moby/sys/mountinfo" "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "golang.org/x/sys/unix" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) const ( @@ -131,7 +130,7 @@ func checkSelinuxLevel(level string) error { matched, err := regexp.MatchString(`^s\d(-s\d)??(:c\d{1,4}(\.c\d{1,4})?(,c\d{1,4}(\.c\d{1,4})?)*)?$`, level) if err != nil { - return errors.Wrapf(err, "the format of 'level' %q is not correct", level) + return fmt.Errorf("the format of 'level' %q is not correct: %w", level, err) } if !matched { return fmt.Errorf("the format of 'level' %q is not correct", level) @@ -241,7 +240,7 @@ func ensureRemoveAll(ctx context.Context, dir string) error { return err } if e := mount.Unmount(pe.Path, unix.MNT_DETACH); e != nil { - return errors.Wrapf(e, "error while removing %s", dir) + return fmt.Errorf("error while removing %s: %w", dir, e) } if exitOnErr[pe.Path] == maxRetry { @@ -271,7 +270,7 @@ func modifyProcessLabel(runtimeType string, spec *specs.Spec) error { } l, err := seutil.ChangeToKVM(spec.Process.SelinuxLabel) if err != nil { - return errors.Wrap(err, "failed to get selinux kvm label") + return fmt.Errorf("failed to get selinux kvm label: %w", err) } spec.Process.SelinuxLabel = l return nil diff --git a/pkg/cri/server/helpers_linux_test.go b/pkg/cri/server/helpers_linux_test.go index ca19c15..591cb4c 100644 --- a/pkg/cri/server/helpers_linux_test.go +++ b/pkg/cri/server/helpers_linux_test.go @@ -17,7 +17,6 @@ package server import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -66,15 +65,9 @@ func TestEnsureRemoveAllWithMount(t *testing.T) { t.Skip("skipping test that requires root") } - dir1, err := ioutil.TempDir("", "test-ensure-removeall-with-dir1") - if err != nil { - t.Fatal(err) - } - dir2, err := ioutil.TempDir("", "test-ensure-removeall-with-dir2") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir2) + var err error + dir1 := t.TempDir() + dir2 := t.TempDir() bindDir := filepath.Join(dir1, "bind") if err := os.MkdirAll(bindDir, 0755); err != nil { diff --git a/pkg/cri/server/helpers_other.go b/pkg/cri/server/helpers_other.go index 6a67375..99df63e 100644 --- a/pkg/cri/server/helpers_other.go +++ b/pkg/cri/server/helpers_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* diff --git a/pkg/cri/server/helpers_selinux_linux_test.go b/pkg/cri/server/helpers_selinux_linux_test.go index 1ec742a..6bccee2 100644 --- a/pkg/cri/server/helpers_selinux_linux_test.go +++ b/pkg/cri/server/helpers_selinux_linux_test.go @@ -21,7 +21,7 @@ import ( "github.com/opencontainers/selinux/go-selinux" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestInitSelinuxOpts(t *testing.T) { diff --git a/pkg/cri/server/helpers_test.go b/pkg/cri/server/helpers_test.go index 8fa93cf..f73e174 100644 --- a/pkg/cri/server/helpers_test.go +++ b/pkg/cri/server/helpers_test.go @@ -18,26 +18,26 @@ package server import ( "context" - "io/ioutil" + "os" "strings" "testing" "time" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/oci" + criconfig "github.com/containerd/containerd/pkg/cri/config" + containerstore "github.com/containerd/containerd/pkg/cri/store/container" + imagestore "github.com/containerd/containerd/pkg/cri/store/image" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/reference/docker" "github.com/containerd/containerd/runtime/linux/runctypes" runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" + imagedigest "github.com/opencontainers/go-digest" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pelletier/go-toml" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - criconfig "github.com/containerd/containerd/pkg/cri/config" - "github.com/containerd/containerd/pkg/cri/store" - containerstore "github.com/containerd/containerd/pkg/cri/store/container" - imagestore "github.com/containerd/containerd/pkg/cri/store/image" ) // TestGetUserFromImage tests the logic of getting image uid or user name of image user. @@ -192,7 +192,7 @@ func TestLocalResolve(t *testing.T) { assert.Equal(t, image, img) } img, err := c.localResolve("randomid") - assert.Equal(t, store.ErrNotExist, err) + assert.Equal(t, errdefs.IsNotFound(err), true) assert.Equal(t, imagestore.Image{}, img) } @@ -492,17 +492,14 @@ func TestEnsureRemoveAllNotExist(t *testing.T) { } func TestEnsureRemoveAllWithDir(t *testing.T) { - dir, err := ioutil.TempDir("", "test-ensure-removeall-with-dir") - if err != nil { - t.Fatal(err) - } + dir := t.TempDir() if err := ensureRemoveAll(context.Background(), dir); err != nil { t.Fatal(err) } } func TestEnsureRemoveAllWithFile(t *testing.T) { - tmp, err := ioutil.TempFile("", "test-ensure-removeall-with-dir") + tmp, err := os.CreateTemp("", "test-ensure-removeall-with-dir") if err != nil { t.Fatal(err) } diff --git a/pkg/cri/server/helpers_windows.go b/pkg/cri/server/helpers_windows.go index f88f34b..1b89401 100644 --- a/pkg/cri/server/helpers_windows.go +++ b/pkg/cri/server/helpers_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/pkg/cri/server/image_list.go b/pkg/cri/server/image_list.go index dc6aeec..de4d1d0 100644 --- a/pkg/cri/server/image_list.go +++ b/pkg/cri/server/image_list.go @@ -18,7 +18,7 @@ package server import ( "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // ListImages lists existing images. diff --git a/pkg/cri/server/image_list_test.go b/pkg/cri/server/image_list_test.go index 32161ff..09e4e0f 100644 --- a/pkg/cri/server/image_list_test.go +++ b/pkg/cri/server/image_list_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" imagestore "github.com/containerd/containerd/pkg/cri/store/image" ) diff --git a/pkg/cri/server/image_pull.go b/pkg/cri/server/image_pull.go index 8a6a8bf..a9da89c 100644 --- a/pkg/cri/server/image_pull.go +++ b/pkg/cri/server/image_pull.go @@ -21,10 +21,10 @@ import ( "crypto/x509" "encoding/base64" "fmt" - "io/ioutil" "net" "net/http" "net/url" + "os" "path/filepath" "strings" "time" @@ -32,17 +32,17 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/errdefs" containerdimages "github.com/containerd/containerd/images" - "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" + crilabels "github.com/containerd/containerd/pkg/cri/labels" + snpkg "github.com/containerd/containerd/pkg/snapshotters" distribution "github.com/containerd/containerd/reference/docker" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker/config" "github.com/containerd/imgcrypt" "github.com/containerd/imgcrypt/images/encryption" imagespec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" criconfig "github.com/containerd/containerd/pkg/cri/config" ) @@ -93,7 +93,7 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest) imageRef := r.GetImage().GetImage() namedRef, err := distribution.ParseDockerRef(imageRef) if err != nil { - return nil, errors.Wrapf(err, "failed to parse image reference %q", imageRef) + return nil, fmt.Errorf("failed to parse image reference %q: %w", imageRef, err) } ref := namedRef.String() if ref != imageRef { @@ -114,20 +114,25 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest) } ) + labels := c.getLabels(ctx, ref) + pullOpts := []containerd.RemoteOpt{ containerd.WithSchema1Conversion, containerd.WithResolver(resolver), containerd.WithPullSnapshotter(c.config.ContainerdConfig.Snapshotter), containerd.WithPullUnpack, - containerd.WithPullLabel(imageLabelKey, imageLabelValue), + containerd.WithPullLabels(labels), containerd.WithMaxConcurrentDownloads(c.config.MaxConcurrentDownloads), containerd.WithImageHandler(imageHandler), + containerd.WithUnpackOpts([]containerd.UnpackOpt{ + containerd.WithUnpackDuplicationSuppressor(c.unpackDuplicationSuppressor), + }), } pullOpts = append(pullOpts, c.encryptedImagesPullOpts()...) if !c.config.ContainerdConfig.DisableSnapshotAnnotations { pullOpts = append(pullOpts, - containerd.WithImageHandlerWrapper(appendInfoHandlerWrapper(ref))) + containerd.WithImageHandlerWrapper(snpkg.AppendInfoHandlerWrapper(ref))) } if c.config.ContainerdConfig.DiscardUnpackedLayers { @@ -138,12 +143,12 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest) image, err := c.client.Pull(ctx, ref, pullOpts...) if err != nil { - return nil, errors.Wrapf(err, "failed to pull and unpack image %q", ref) + return nil, fmt.Errorf("failed to pull and unpack image %q: %w", ref, err) } configDesc, err := image.Config(ctx) if err != nil { - return nil, errors.Wrap(err, "get image config descriptor") + return nil, fmt.Errorf("get image config descriptor: %w", err) } imageID := configDesc.Digest.String() @@ -152,14 +157,14 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest) if r == "" { continue } - if err := c.createImageReference(ctx, r, image.Target()); err != nil { - return nil, errors.Wrapf(err, "failed to create image reference %q", r) + if err := c.createImageReference(ctx, r, image.Target(), labels); err != nil { + return nil, fmt.Errorf("failed to create image reference %q: %w", r, err) } // Update image store to reflect the newest state in containerd. // No need to use `updateImage`, because the image reference must // have been managed by the cri plugin. if err := c.imageStore.Update(ctx, r); err != nil { - return nil, errors.Wrapf(err, "failed to update image store %q", r) + return nil, fmt.Errorf("failed to update image store %q: %w", r, err) } } @@ -182,7 +187,7 @@ func ParseAuth(auth *runtime.AuthConfig, host string) (string, string, error) { // Do not return the auth info when server address doesn't match. u, err := url.Parse(auth.ServerAddress) if err != nil { - return "", "", errors.Wrap(err, "parse server address") + return "", "", fmt.Errorf("parse server address: %w", err) } if host != u.Host { return "", "", nil @@ -203,7 +208,7 @@ func ParseAuth(auth *runtime.AuthConfig, host string) (string, string, error) { } fields := strings.SplitN(string(decoded), ":", 2) if len(fields) != 2 { - return "", "", errors.Errorf("invalid decoded auth: %q", decoded) + return "", "", fmt.Errorf("invalid decoded auth: %q", decoded) } user, passwd := fields[0], fields[1] return user, strings.Trim(passwd, "\x00"), nil @@ -217,12 +222,12 @@ func ParseAuth(auth *runtime.AuthConfig, host string) (string, string, error) { // Note that because create and update are not finished in one transaction, there could be race. E.g. // the image reference is deleted by someone else after create returns already exists, but before update // happens. -func (c *criService) createImageReference(ctx context.Context, name string, desc imagespec.Descriptor) error { +func (c *criService) createImageReference(ctx context.Context, name string, desc imagespec.Descriptor, labels map[string]string) error { img := containerdimages.Image{ Name: name, Target: desc, // Add a label to indicate that the image is managed by the cri plugin. - Labels: map[string]string{imageLabelKey: imageLabelValue}, + Labels: labels, } // TODO(random-liu): Figure out which is the more performant sequence create then update or // update then create. @@ -230,44 +235,63 @@ func (c *criService) createImageReference(ctx context.Context, name string, desc if err == nil || !errdefs.IsAlreadyExists(err) { return err } - if oldImg.Target.Digest == img.Target.Digest && oldImg.Labels[imageLabelKey] == imageLabelValue { + if oldImg.Target.Digest == img.Target.Digest && oldImg.Labels[crilabels.ImageLabelKey] == labels[crilabels.ImageLabelKey] { return nil } - _, err = c.client.ImageService().Update(ctx, img, "target", "labels") + _, err = c.client.ImageService().Update(ctx, img, "target", "labels."+crilabels.ImageLabelKey) return err } +// getLabels get image labels to be added on CRI image +func (c *criService) getLabels(ctx context.Context, name string) map[string]string { + labels := map[string]string{crilabels.ImageLabelKey: crilabels.ImageLabelValue} + configSandboxImage := c.config.SandboxImage + // parse sandbox image + sandboxNamedRef, err := distribution.ParseDockerRef(configSandboxImage) + if err != nil { + log.G(ctx).Errorf("failed to parse sandbox image from config %s", sandboxNamedRef) + return nil + } + sandboxRef := sandboxNamedRef.String() + // Adding pinned image label to sandbox image + if sandboxRef == name { + labels[crilabels.PinnedImageLabelKey] = crilabels.PinnedImageLabelValue + } + return labels +} + // updateImage updates image store to reflect the newest state of an image reference // in containerd. If the reference is not managed by the cri plugin, the function also // generates necessary metadata for the image and make it managed. func (c *criService) updateImage(ctx context.Context, r string) error { img, err := c.client.GetImage(ctx, r) if err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "get image by reference") + return fmt.Errorf("get image by reference: %w", err) } - if err == nil && img.Labels()[imageLabelKey] != imageLabelValue { + if err == nil && img.Labels()[crilabels.ImageLabelKey] != crilabels.ImageLabelValue { // Make sure the image has the image id as its unique // identifier that references the image in its lifetime. configDesc, err := img.Config(ctx) if err != nil { - return errors.Wrap(err, "get image id") + return fmt.Errorf("get image id: %w", err) } id := configDesc.Digest.String() - if err := c.createImageReference(ctx, id, img.Target()); err != nil { - return errors.Wrapf(err, "create image id reference %q", id) + labels := c.getLabels(ctx, id) + if err := c.createImageReference(ctx, id, img.Target(), labels); err != nil { + return fmt.Errorf("create image id reference %q: %w", id, err) } if err := c.imageStore.Update(ctx, id); err != nil { - return errors.Wrapf(err, "update image store for %q", id) + return fmt.Errorf("update image store for %q: %w", id, err) } // The image id is ready, add the label to mark the image as managed. - if err := c.createImageReference(ctx, r, img.Target()); err != nil { - return errors.Wrap(err, "create managed label") + if err := c.createImageReference(ctx, r, img.Target(), labels); err != nil { + return fmt.Errorf("create managed label: %w", err) } } // If the image is not found, we should continue updating the cache, // so that the image can be removed from the cache. if err := c.imageStore.Update(ctx, r); err != nil { - return errors.Wrapf(err, "update image store for %q", r) + return fmt.Errorf("update image store for %q: %w", r, err) } return nil } @@ -280,30 +304,30 @@ func (c *criService) getTLSConfig(registryTLSConfig criconfig.TLSConfig) (*tls.C err error ) if registryTLSConfig.CertFile != "" && registryTLSConfig.KeyFile == "" { - return nil, errors.Errorf("cert file %q was specified, but no corresponding key file was specified", registryTLSConfig.CertFile) + return nil, fmt.Errorf("cert file %q was specified, but no corresponding key file was specified", registryTLSConfig.CertFile) } if registryTLSConfig.CertFile == "" && registryTLSConfig.KeyFile != "" { - return nil, errors.Errorf("key file %q was specified, but no corresponding cert file was specified", registryTLSConfig.KeyFile) + return nil, fmt.Errorf("key file %q was specified, but no corresponding cert file was specified", registryTLSConfig.KeyFile) } if registryTLSConfig.CertFile != "" && registryTLSConfig.KeyFile != "" { cert, err = tls.LoadX509KeyPair(registryTLSConfig.CertFile, registryTLSConfig.KeyFile) if err != nil { - return nil, errors.Wrap(err, "failed to load cert file") + return nil, fmt.Errorf("failed to load cert file: %w", err) } if len(cert.Certificate) != 0 { tlsConfig.Certificates = []tls.Certificate{cert} } - tlsConfig.BuildNameToCertificate() // nolint:staticcheck + tlsConfig.BuildNameToCertificate() //nolint:staticcheck // TODO(thaJeztah): verify if we should ignore the deprecation; see https://github.com/containerd/containerd/pull/7349/files#r990644833 } if registryTLSConfig.CAFile != "" { caCertPool, err := x509.SystemCertPool() if err != nil { - return nil, errors.Wrap(err, "failed to get system cert pool") + return nil, fmt.Errorf("failed to get system cert pool: %w", err) } - caCert, err := ioutil.ReadFile(registryTLSConfig.CAFile) + caCert, err := os.ReadFile(registryTLSConfig.CAFile) if err != nil { - return nil, errors.Wrap(err, "failed to load CA file") + return nil, fmt.Errorf("failed to load CA file: %w", err) } caCertPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caCertPool @@ -354,12 +378,12 @@ func (c *criService) registryHosts(ctx context.Context, auth *runtime.AuthConfig endpoints, err := c.registryEndpoints(host) if err != nil { - return nil, errors.Wrap(err, "get registry endpoints") + return nil, fmt.Errorf("get registry endpoints: %w", err) } for _, e := range endpoints { u, err := url.Parse(e) if err != nil { - return nil, errors.Wrapf(err, "parse registry endpoint %q from mirrors", e) + return nil, fmt.Errorf("parse registry endpoint %q from mirrors: %w", e, err) } var ( @@ -371,7 +395,12 @@ func (c *criService) registryHosts(ctx context.Context, auth *runtime.AuthConfig if config.TLS != nil { transport.TLSClientConfig, err = c.getTLSConfig(*config.TLS) if err != nil { - return nil, errors.Wrapf(err, "get TLSConfig for registry %q", e) + return nil, fmt.Errorf("get TLSConfig for registry %q: %w", e, err) + } + } else if docker.IsLocalhost(host) && u.Scheme == "http" { + // Skipping TLS verification for localhost + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, } } @@ -406,10 +435,7 @@ func (c *criService) registryHosts(ctx context.Context, auth *runtime.AuthConfig // defaultScheme returns the default scheme for a registry host. func defaultScheme(host string) string { - if h, _, err := net.SplitHostPort(host); err == nil { - host = h - } - if host == "localhost" || host == "127.0.0.1" || host == "::1" { + if docker.IsLocalhost(host) { return "http" } return "https" @@ -441,19 +467,19 @@ func (c *criService) registryEndpoints(host string) ([]string, error) { } defaultHost, err := docker.DefaultHost(host) if err != nil { - return nil, errors.Wrap(err, "get default host") + return nil, fmt.Errorf("get default host: %w", err) } for i := range endpoints { en, err := addDefaultScheme(endpoints[i]) if err != nil { - return nil, errors.Wrap(err, "parse endpoint url") + return nil, fmt.Errorf("parse endpoint url: %w", err) } endpoints[i] = en } for _, e := range endpoints { u, err := url.Parse(e) if err != nil { - return nil, errors.Wrap(err, "parse endpoint url") + return nil, fmt.Errorf("parse endpoint url: %w", err) } if u.Host == host { // Do not add default if the endpoint already exists. @@ -491,73 +517,3 @@ func (c *criService) encryptedImagesPullOpts() []containerd.RemoteOpt { } return nil } - -const ( - // targetRefLabel is a label which contains image reference and will be passed - // to snapshotters. - targetRefLabel = "containerd.io/snapshot/cri.image-ref" - // targetManifestDigestLabel is a label which contains manifest digest and will be passed - // to snapshotters. - targetManifestDigestLabel = "containerd.io/snapshot/cri.manifest-digest" - // targetLayerDigestLabel is a label which contains layer digest and will be passed - // to snapshotters. - targetLayerDigestLabel = "containerd.io/snapshot/cri.layer-digest" - // targetImageLayersLabel is a label which contains layer digests contained in - // the target image and will be passed to snapshotters for preparing layers in - // parallel. Skipping some layers is allowed and only affects performance. - targetImageLayersLabel = "containerd.io/snapshot/cri.image-layers" -) - -// appendInfoHandlerWrapper makes a handler which appends some basic information -// of images like digests for manifest and their child layers as annotations during unpack. -// These annotations will be passed to snapshotters as labels. These labels will be -// used mainly by stargz-based snapshotters for querying image contents from the -// registry. -func appendInfoHandlerWrapper(ref string) func(f containerdimages.Handler) containerdimages.Handler { - return func(f containerdimages.Handler) containerdimages.Handler { - return containerdimages.HandlerFunc(func(ctx context.Context, desc imagespec.Descriptor) ([]imagespec.Descriptor, error) { - children, err := f.Handle(ctx, desc) - if err != nil { - return nil, err - } - switch desc.MediaType { - case imagespec.MediaTypeImageManifest, containerdimages.MediaTypeDockerSchema2Manifest: - for i := range children { - c := &children[i] - if containerdimages.IsLayerType(c.MediaType) { - if c.Annotations == nil { - c.Annotations = make(map[string]string) - } - c.Annotations[targetRefLabel] = ref - c.Annotations[targetLayerDigestLabel] = c.Digest.String() - c.Annotations[targetImageLayersLabel] = getLayers(ctx, targetImageLayersLabel, children[i:], labels.Validate) - c.Annotations[targetManifestDigestLabel] = desc.Digest.String() - } - } - } - return children, nil - }) - } -} - -// getLayers returns comma-separated digests based on the passed list of -// descriptors. The returned list contains as many digests as possible as well -// as meets the label validation. -func getLayers(ctx context.Context, key string, descs []imagespec.Descriptor, validate func(k, v string) error) (layers string) { - var item string - for _, l := range descs { - if containerdimages.IsLayerType(l.MediaType) { - item = l.Digest.String() - if layers != "" { - item = "," + item - } - // This avoids the label hits the size limitation. - if err := validate(key, layers+item); err != nil { - log.G(ctx).WithError(err).WithField("label", key).Debugf("%q is omitted in the layers list", l.Digest.String()) - break - } - layers += item - } - } - return -} diff --git a/pkg/cri/server/image_pull_test.go b/pkg/cri/server/image_pull_test.go index b6aee26..b66a1bb 100644 --- a/pkg/cri/server/image_pull_test.go +++ b/pkg/cri/server/image_pull_test.go @@ -19,16 +19,13 @@ package server import ( "context" "encoding/base64" - "fmt" - "strings" "testing" - digest "github.com/opencontainers/go-digest" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" criconfig "github.com/containerd/containerd/pkg/cri/config" + "github.com/containerd/containerd/pkg/cri/labels" ) func TestParseAuth(t *testing.T) { @@ -332,48 +329,56 @@ func TestEncryptedImagePullOpts(t *testing.T) { assert.Equal(t, test.expectedOpts, got) } } +func TestImageGetLabels(t *testing.T) { -func TestImageLayersLabel(t *testing.T) { - sampleKey := "sampleKey" - sampleDigest, err := digest.Parse("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - assert.NoError(t, err) - sampleMaxSize := 300 - sampleValidate := func(k, v string) error { - if (len(k) + len(v)) > sampleMaxSize { - return fmt.Errorf("invalid: %q: %q", k, v) - } - return nil - } + criService := newTestCRIService() tests := []struct { - name string - layersNum int - wantNum int + name string + expectedLabel map[string]string + configSandboxImage string + pullImageName string }{ { - name: "valid number of layers", - layersNum: 2, - wantNum: 2, + name: "pinned image labels should get added on sandbox image", + expectedLabel: map[string]string{labels.ImageLabelKey: labels.ImageLabelValue, labels.PinnedImageLabelKey: labels.PinnedImageLabelValue}, + configSandboxImage: "k8s.gcr.io/pause:3.9", + pullImageName: "k8s.gcr.io/pause:3.9", }, { - name: "many layers", - layersNum: 5, // hits sampleMaxSize (300 chars). - wantNum: 4, // layers should be omitted for avoiding invalid label. + name: "pinned image labels should get added on sandbox image without tag", + expectedLabel: map[string]string{labels.ImageLabelKey: labels.ImageLabelValue, labels.PinnedImageLabelKey: labels.PinnedImageLabelValue}, + configSandboxImage: "k8s.gcr.io/pause", + pullImageName: "k8s.gcr.io/pause:latest", + }, + { + name: "pinned image labels should get added on sandbox image specified with tag and digest both", + expectedLabel: map[string]string{labels.ImageLabelKey: labels.ImageLabelValue, labels.PinnedImageLabelKey: labels.PinnedImageLabelValue}, + configSandboxImage: "k8s.gcr.io/pause:3.9@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2", + pullImageName: "k8s.gcr.io/pause@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2", + }, + + { + name: "pinned image labels should get added on sandbox image specified with digest", + expectedLabel: map[string]string{labels.ImageLabelKey: labels.ImageLabelValue, labels.PinnedImageLabelKey: labels.PinnedImageLabelValue}, + configSandboxImage: "k8s.gcr.io/pause@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2", + pullImageName: "k8s.gcr.io/pause@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2", + }, + + { + name: "pinned image labels should not get added on other image", + expectedLabel: map[string]string{labels.ImageLabelKey: labels.ImageLabelValue}, + configSandboxImage: "k8s.gcr.io/pause:3.9", + pullImageName: "k8s.gcr.io/random:latest", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var sampleLayers []imagespec.Descriptor - for i := 0; i < tt.layersNum; i++ { - sampleLayers = append(sampleLayers, imagespec.Descriptor{ - MediaType: imagespec.MediaTypeImageLayerGzip, - Digest: sampleDigest, - }) - } - gotS := getLayers(context.Background(), sampleKey, sampleLayers, sampleValidate) - got := len(strings.Split(gotS, ",")) - assert.Equal(t, tt.wantNum, got) + criService.config.SandboxImage = tt.configSandboxImage + labels := criService.getLabels(context.Background(), tt.pullImageName) + assert.Equal(t, tt.expectedLabel, labels) + }) } } diff --git a/pkg/cri/server/image_remove.go b/pkg/cri/server/image_remove.go index 1e0ce69..8821661 100644 --- a/pkg/cri/server/image_remove.go +++ b/pkg/cri/server/image_remove.go @@ -17,13 +17,13 @@ package server import ( + "fmt" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" - "github.com/pkg/errors" - "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - "github.com/containerd/containerd/pkg/cri/store" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // RemoveImage removes the image. @@ -35,11 +35,11 @@ import ( func (c *criService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (*runtime.RemoveImageResponse, error) { image, err := c.localResolve(r.GetImage().GetImage()) if err != nil { - if err == store.ErrNotExist { + if errdefs.IsNotFound(err) { // return empty without error when image not found. return &runtime.RemoveImageResponse{}, nil } - return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage()) + return nil, fmt.Errorf("can not resolve %q locally: %w", r.GetImage().GetImage(), err) } // Remove all image references. @@ -55,11 +55,11 @@ func (c *criService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequ if err == nil || errdefs.IsNotFound(err) { // Update image store to reflect the newest state in containerd. if err := c.imageStore.Update(ctx, ref); err != nil { - return nil, errors.Wrapf(err, "failed to update image reference %q for %q", ref, image.ID) + return nil, fmt.Errorf("failed to update image reference %q for %q: %w", ref, image.ID, err) } continue } - return nil, errors.Wrapf(err, "failed to delete image reference %q for %q", ref, image.ID) + return nil, fmt.Errorf("failed to delete image reference %q for %q: %w", ref, image.ID, err) } return &runtime.RemoveImageResponse{}, nil } diff --git a/pkg/cri/server/image_status.go b/pkg/cri/server/image_status.go index 5202f5d..884aca4 100644 --- a/pkg/cri/server/image_status.go +++ b/pkg/cri/server/image_status.go @@ -18,15 +18,15 @@ package server import ( "encoding/json" + "fmt" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - - "github.com/containerd/containerd/pkg/cri/store" imagestore "github.com/containerd/containerd/pkg/cri/store/image" + + imagespec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // ImageStatus returns the status of the image, returns nil if the image isn't present. @@ -35,11 +35,11 @@ import ( func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (*runtime.ImageStatusResponse, error) { image, err := c.localResolve(r.GetImage().GetImage()) if err != nil { - if err == store.ErrNotExist { + if errdefs.IsNotFound(err) { // return empty without error when image not found. return &runtime.ImageStatusResponse{}, nil } - return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage()) + return nil, fmt.Errorf("can not resolve %q locally: %w", r.GetImage().GetImage(), err) } // TODO(random-liu): [P0] Make sure corresponding snapshot exists. What if snapshot // doesn't exist? @@ -47,7 +47,7 @@ func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequ runtimeImage := toCRIImage(image) info, err := c.toCRIImageInfo(ctx, &image, r.GetVerbose()) if err != nil { - return nil, errors.Wrap(err, "failed to generate image info") + return nil, fmt.Errorf("failed to generate image info: %w", err) } return &runtime.ImageStatusResponse{ @@ -59,12 +59,15 @@ func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequ // toCRIImage converts internal image object to CRI runtime.Image. func toCRIImage(image imagestore.Image) *runtime.Image { repoTags, repoDigests := parseImageReferences(image.References) + runtimeImage := &runtime.Image{ Id: image.ID, RepoTags: repoTags, RepoDigests: repoDigests, Size_: uint64(image.Size), + Pinned: image.Pinned, } + uid, username := getUserFromImage(image.ImageSpec.Config.User) if uid != nil { runtimeImage.Uid = &runtime.Int64Value{Value: *uid} diff --git a/pkg/cri/server/image_status_test.go b/pkg/cri/server/image_status_test.go index 5ac029b..49cd046 100644 --- a/pkg/cri/server/image_status_test.go +++ b/pkg/cri/server/image_status_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" imagestore "github.com/containerd/containerd/pkg/cri/store/image" ) diff --git a/pkg/cri/server/imagefs_info.go b/pkg/cri/server/imagefs_info.go index 81dbd5c..c91404d 100644 --- a/pkg/cri/server/imagefs_info.go +++ b/pkg/cri/server/imagefs_info.go @@ -21,7 +21,7 @@ import ( "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // ImageFsInfo returns information of the filesystem that is used to store images. diff --git a/pkg/cri/server/imagefs_info_test.go b/pkg/cri/server/imagefs_info_test.go index d70231d..782c86a 100644 --- a/pkg/cri/server/imagefs_info_test.go +++ b/pkg/cri/server/imagefs_info_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" snapshotstore "github.com/containerd/containerd/pkg/cri/store/snapshot" ) diff --git a/pkg/cri/server/instrumented_service.go b/pkg/cri/server/instrumented_service.go index 893de98..fa8f53d 100644 --- a/pkg/cri/server/instrumented_service.go +++ b/pkg/cri/server/instrumented_service.go @@ -22,7 +22,8 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + runtime_alpha "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" ctrdutil "github.com/containerd/containerd/pkg/cri/util" ) @@ -36,6 +37,15 @@ func newInstrumentedService(c *criService) grpcServices { return &instrumentedService{c: c} } +// instrumentedAlphaService wraps service with containerd namespace and logs. +type instrumentedAlphaService struct { + c *criService +} + +func newInstrumentedAlphaService(c *criService) grpcAlphaServices { + return &instrumentedAlphaService{c: c} +} + // checkInitialized returns error if the server is not fully initialized. // GRPC service request handlers should return error before server is fully // initialized. @@ -47,11 +57,22 @@ func (in *instrumentedService) checkInitialized() error { return errors.New("server is not initialized yet") } +// checkInitialized returns error if the server is not fully initialized. +// GRPC service request handlers should return error before server is fully +// initialized. +// NOTE(random-liu): All following functions MUST check initialized at the beginning. +func (in *instrumentedAlphaService) checkInitialized() error { + if in.c.initialized.IsSet() { + return nil + } + return errors.New("server is not initialized yet") +} + func (in *instrumentedService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandboxRequest) (res *runtime.RunPodSandboxResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err } - log.G(ctx).Infof("RunPodsandbox for %+v", r.GetConfig().GetMetadata()) + log.G(ctx).Infof("RunPodSandbox for %+v", r.GetConfig().GetMetadata()) defer func() { if err != nil { log.G(ctx).WithError(err).Errorf("RunPodSandbox for %+v failed, error", r.GetConfig().GetMetadata()) @@ -63,6 +84,43 @@ func (in *instrumentedService) RunPodSandbox(ctx context.Context, r *runtime.Run return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) RunPodSandbox(ctx context.Context, r *runtime_alpha.RunPodSandboxRequest) (res *runtime_alpha.RunPodSandboxResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("RunPodSandbox for %+v", r.GetConfig().GetMetadata()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("RunPodSandbox for %+v failed, error", r.GetConfig().GetMetadata()) + } else { + log.G(ctx).Infof("RunPodSandbox for %+v returns sandbox id %q", r.GetConfig().GetMetadata(), res.GetPodSandboxId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.RunPodSandboxRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.RunPodSandboxResponse + v1res, err = in.c.RunPodSandbox(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.RunPodSandboxResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("RunPodSandbox for %+v failed, error", r.GetConfig().GetMetadata()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ListPodSandbox(ctx context.Context, r *runtime.ListPodSandboxRequest) (res *runtime.ListPodSandboxResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -79,6 +137,43 @@ func (in *instrumentedService) ListPodSandbox(ctx context.Context, r *runtime.Li return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ListPodSandbox(ctx context.Context, r *runtime_alpha.ListPodSandboxRequest) (res *runtime_alpha.ListPodSandboxResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ListPodSandbox with filter %+v", r.GetFilter()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("ListPodSandbox failed") + } else { + log.G(ctx).Tracef("ListPodSandbox returns pod sandboxes %+v", res.GetItems()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ListPodSandboxRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ListPodSandboxResponse + v1res, err = in.c.ListPodSandbox(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ListPodSandboxResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Error("ListPodSandbox failed") + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandboxStatusRequest) (res *runtime.PodSandboxStatusResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -95,6 +190,43 @@ func (in *instrumentedService) PodSandboxStatus(ctx context.Context, r *runtime. return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) PodSandboxStatus(ctx context.Context, r *runtime_alpha.PodSandboxStatusRequest) (res *runtime_alpha.PodSandboxStatusResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("PodSandboxStatus for %q", r.GetPodSandboxId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("PodSandboxStatus for %q failed", r.GetPodSandboxId()) + } else { + log.G(ctx).Tracef("PodSandboxStatus for %q returns status %+v", r.GetPodSandboxId(), res.GetStatus()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.PodSandboxStatusRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.PodSandboxStatusResponse + v1res, err = in.c.PodSandboxStatus(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.PodSandboxStatusResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("PodSandboxStatus for %q failed", r.GetPodSandboxId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandboxRequest) (_ *runtime.StopPodSandboxResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -111,6 +243,43 @@ func (in *instrumentedService) StopPodSandbox(ctx context.Context, r *runtime.St return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) StopPodSandbox(ctx context.Context, r *runtime_alpha.StopPodSandboxRequest) (res *runtime_alpha.StopPodSandboxResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("StopPodSandbox for %q", r.GetPodSandboxId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("StopPodSandbox for %q failed", r.GetPodSandboxId()) + } else { + log.G(ctx).Infof("StopPodSandbox for %q returns successfully", r.GetPodSandboxId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.StopPodSandboxRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.StopPodSandboxResponse + v1res, err = in.c.StopPodSandbox(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.StopPodSandboxResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("StopPodSandbox for %q failed", r.GetPodSandboxId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodSandboxRequest) (_ *runtime.RemovePodSandboxResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -127,6 +296,43 @@ func (in *instrumentedService) RemovePodSandbox(ctx context.Context, r *runtime. return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) RemovePodSandbox(ctx context.Context, r *runtime_alpha.RemovePodSandboxRequest) (res *runtime_alpha.RemovePodSandboxResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("RemovePodSandbox for %q", r.GetPodSandboxId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("RemovePodSandbox for %q failed", r.GetPodSandboxId()) + } else { + log.G(ctx).Infof("RemovePodSandbox %q returns successfully", r.GetPodSandboxId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.RemovePodSandboxRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.RemovePodSandboxResponse + v1res, err = in.c.RemovePodSandbox(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.RemovePodSandboxResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("RemovePodSandbox for %q failed", r.GetPodSandboxId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) PortForward(ctx context.Context, r *runtime.PortForwardRequest) (res *runtime.PortForwardResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -143,6 +349,43 @@ func (in *instrumentedService) PortForward(ctx context.Context, r *runtime.PortF return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) PortForward(ctx context.Context, r *runtime_alpha.PortForwardRequest) (res *runtime_alpha.PortForwardResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("Portforward for %q port %v", r.GetPodSandboxId(), r.GetPort()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("Portforward for %q failed", r.GetPodSandboxId()) + } else { + log.G(ctx).Infof("Portforward for %q returns URL %q", r.GetPodSandboxId(), res.GetUrl()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.PortForwardRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.PortForwardResponse + v1res, err = in.c.PortForward(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.PortForwardResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("Portforward for %q failed", r.GetPodSandboxId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) CreateContainer(ctx context.Context, r *runtime.CreateContainerRequest) (res *runtime.CreateContainerResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -162,6 +405,47 @@ func (in *instrumentedService) CreateContainer(ctx context.Context, r *runtime.C return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) CreateContainer(ctx context.Context, r *runtime_alpha.CreateContainerRequest) (res *runtime_alpha.CreateContainerResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("CreateContainer within sandbox %q for container %+v", + r.GetPodSandboxId(), r.GetConfig().GetMetadata()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("CreateContainer within sandbox %q for %+v failed", + r.GetPodSandboxId(), r.GetConfig().GetMetadata()) + } else { + log.G(ctx).Infof("CreateContainer within sandbox %q for %+v returns container id %q", + r.GetPodSandboxId(), r.GetConfig().GetMetadata(), res.GetContainerId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.CreateContainerRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.CreateContainerResponse + v1res, err = in.c.CreateContainer(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.CreateContainerResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("CreateContainer within sandbox %q for %+v failed", + r.GetPodSandboxId(), r.GetConfig().GetMetadata()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) StartContainer(ctx context.Context, r *runtime.StartContainerRequest) (_ *runtime.StartContainerResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -178,6 +462,43 @@ func (in *instrumentedService) StartContainer(ctx context.Context, r *runtime.St return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) StartContainer(ctx context.Context, r *runtime_alpha.StartContainerRequest) (res *runtime_alpha.StartContainerResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("StartContainer for %q", r.GetContainerId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("StartContainer for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Infof("StartContainer for %q returns successfully", r.GetContainerId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.StartContainerRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.StartContainerResponse + v1res, err = in.c.StartContainer(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.StartContainerResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("StartContainer for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ListContainers(ctx context.Context, r *runtime.ListContainersRequest) (res *runtime.ListContainersResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -195,6 +516,44 @@ func (in *instrumentedService) ListContainers(ctx context.Context, r *runtime.Li return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ListContainers(ctx context.Context, r *runtime_alpha.ListContainersRequest) (res *runtime_alpha.ListContainersResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ListContainers with filter %+v", r.GetFilter()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("ListContainers with filter %+v failed", r.GetFilter()) + } else { + log.G(ctx).Tracef("ListContainers with filter %+v returns containers %+v", + r.GetFilter(), res.GetContainers()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ListContainersRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ListContainersResponse + v1res, err = in.c.ListContainers(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ListContainersResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("ListContainers with filter %+v failed", r.GetFilter()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ContainerStatus(ctx context.Context, r *runtime.ContainerStatusRequest) (res *runtime.ContainerStatusResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -211,6 +570,43 @@ func (in *instrumentedService) ContainerStatus(ctx context.Context, r *runtime.C return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ContainerStatus(ctx context.Context, r *runtime_alpha.ContainerStatusRequest) (res *runtime_alpha.ContainerStatusResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ContainerStatus for %q", r.GetContainerId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("ContainerStatus for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Tracef("ContainerStatus for %q returns status %+v", r.GetContainerId(), res.GetStatus()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ContainerStatusRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ContainerStatusResponse + v1res, err = in.c.ContainerStatus(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ContainerStatusResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("ContainerStatus for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) StopContainer(ctx context.Context, r *runtime.StopContainerRequest) (res *runtime.StopContainerResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -227,6 +623,43 @@ func (in *instrumentedService) StopContainer(ctx context.Context, r *runtime.Sto return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) StopContainer(ctx context.Context, r *runtime_alpha.StopContainerRequest) (res *runtime_alpha.StopContainerResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("StopContainer for %q with timeout %d (s)", r.GetContainerId(), r.GetTimeout()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("StopContainer for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Infof("StopContainer for %q returns successfully", r.GetContainerId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.StopContainerRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.StopContainerResponse + v1res, err = in.c.StopContainer(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.StopContainerResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("StopContainer for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) RemoveContainer(ctx context.Context, r *runtime.RemoveContainerRequest) (res *runtime.RemoveContainerResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -243,6 +676,43 @@ func (in *instrumentedService) RemoveContainer(ctx context.Context, r *runtime.R return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) RemoveContainer(ctx context.Context, r *runtime_alpha.RemoveContainerRequest) (res *runtime_alpha.RemoveContainerResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("RemoveContainer for %q", r.GetContainerId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("RemoveContainer for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Infof("RemoveContainer for %q returns successfully", r.GetContainerId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.RemoveContainerRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.RemoveContainerResponse + v1res, err = in.c.RemoveContainer(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.RemoveContainerResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("RemoveContainer for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ExecSync(ctx context.Context, r *runtime.ExecSyncRequest) (res *runtime.ExecSyncResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -259,6 +729,43 @@ func (in *instrumentedService) ExecSync(ctx context.Context, r *runtime.ExecSync return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ExecSync(ctx context.Context, r *runtime_alpha.ExecSyncRequest) (res *runtime_alpha.ExecSyncResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("ExecSync for %q with command %+v and timeout %d (s)", r.GetContainerId(), r.GetCmd(), r.GetTimeout()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("ExecSync for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Debugf("ExecSync for %q returns with exit code %d", r.GetContainerId(), res.GetExitCode()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ExecSyncRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ExecSyncResponse + v1res, err = in.c.ExecSync(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ExecSyncResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("ExecSync for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) Exec(ctx context.Context, r *runtime.ExecRequest) (res *runtime.ExecResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -276,6 +783,44 @@ func (in *instrumentedService) Exec(ctx context.Context, r *runtime.ExecRequest) return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) Exec(ctx context.Context, r *runtime_alpha.ExecRequest) (res *runtime_alpha.ExecResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("Exec for %q with command %+v, tty %v and stdin %v", + r.GetContainerId(), r.GetCmd(), r.GetTty(), r.GetStdin()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("Exec for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Debugf("Exec for %q returns URL %q", r.GetContainerId(), res.GetUrl()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ExecRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ExecResponse + v1res, err = in.c.Exec(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ExecResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("Exec for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) Attach(ctx context.Context, r *runtime.AttachRequest) (res *runtime.AttachResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -292,11 +837,48 @@ func (in *instrumentedService) Attach(ctx context.Context, r *runtime.AttachRequ return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) Attach(ctx context.Context, r *runtime_alpha.AttachRequest) (res *runtime_alpha.AttachResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("Attach for %q with tty %v and stdin %v", r.GetContainerId(), r.GetTty(), r.GetStdin()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("Attach for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Debugf("Attach for %q returns URL %q", r.GetContainerId(), res.Url) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.AttachRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.AttachResponse + v1res, err = in.c.Attach(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.AttachResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("Attach for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (res *runtime.UpdateContainerResourcesResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err } - log.G(ctx).Infof("UpdateContainerResources for %q with %+v", r.GetContainerId(), r.GetLinux()) + log.G(ctx).Infof("UpdateContainerResources for %q with Linux: %+v / Windows: %+v", r.GetContainerId(), r.GetLinux(), r.GetWindows()) defer func() { if err != nil { log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId()) @@ -308,6 +890,43 @@ func (in *instrumentedService) UpdateContainerResources(ctx context.Context, r * return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) UpdateContainerResources(ctx context.Context, r *runtime_alpha.UpdateContainerResourcesRequest) (res *runtime_alpha.UpdateContainerResourcesResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("UpdateContainerResources for %q with Linux: %+v / Windows: %+v", r.GetContainerId(), r.GetLinux(), r.GetWindows()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Infof("UpdateContainerResources for %q returns successfully", r.GetContainerId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.UpdateContainerResourcesRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.UpdateContainerResourcesResponse + v1res, err = in.c.UpdateContainerResources(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.UpdateContainerResourcesResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("UpdateContainerResources for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (res *runtime.PullImageResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -325,6 +944,44 @@ func (in *instrumentedService) PullImage(ctx context.Context, r *runtime.PullIma return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) PullImage(ctx context.Context, r *runtime_alpha.PullImageRequest) (res *runtime_alpha.PullImageResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("PullImage %q", r.GetImage().GetImage()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("PullImage %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Infof("PullImage %q returns image reference %q", + r.GetImage().GetImage(), res.GetImageRef()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.PullImageRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.PullImageResponse + v1res, err = in.c.PullImage(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.PullImageResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("PullImage %q failed", r.GetImage().GetImage()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ListImages(ctx context.Context, r *runtime.ListImagesRequest) (res *runtime.ListImagesResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -342,6 +999,44 @@ func (in *instrumentedService) ListImages(ctx context.Context, r *runtime.ListIm return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ListImages(ctx context.Context, r *runtime_alpha.ListImagesRequest) (res *runtime_alpha.ListImagesResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ListImages with filter %+v", r.GetFilter()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("ListImages with filter %+v failed", r.GetFilter()) + } else { + log.G(ctx).Tracef("ListImages with filter %+v returns image list %+v", + r.GetFilter(), res.GetImages()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ListImagesRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ListImagesResponse + v1res, err = in.c.ListImages(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ListImagesResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("ListImages with filter %+v failed", r.GetFilter()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (res *runtime.ImageStatusResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -359,6 +1054,44 @@ func (in *instrumentedService) ImageStatus(ctx context.Context, r *runtime.Image return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ImageStatus(ctx context.Context, r *runtime_alpha.ImageStatusRequest) (res *runtime_alpha.ImageStatusResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ImageStatus for %q", r.GetImage().GetImage()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("ImageStatus for %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Tracef("ImageStatus for %q returns image status %+v", + r.GetImage().GetImage(), res.GetImage()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ImageStatusRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ImageStatusResponse + v1res, err = in.c.ImageStatus(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ImageStatusResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("ImageStatus for %q failed", r.GetImage().GetImage()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (_ *runtime.RemoveImageResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -375,6 +1108,43 @@ func (in *instrumentedService) RemoveImage(ctx context.Context, r *runtime.Remov return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) RemoveImage(ctx context.Context, r *runtime_alpha.RemoveImageRequest) (res *runtime_alpha.RemoveImageResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Infof("RemoveImage %q", r.GetImage().GetImage()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("RemoveImage %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Infof("RemoveImage %q returns successfully", r.GetImage().GetImage()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.RemoveImageRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.RemoveImageResponse + v1res, err = in.c.RemoveImage(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.RemoveImageResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("RemoveImage %q failed", r.GetImage().GetImage()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ImageFsInfo(ctx context.Context, r *runtime.ImageFsInfoRequest) (res *runtime.ImageFsInfoResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -391,6 +1161,96 @@ func (in *instrumentedService) ImageFsInfo(ctx context.Context, r *runtime.Image return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ImageFsInfo(ctx context.Context, r *runtime_alpha.ImageFsInfoRequest) (res *runtime_alpha.ImageFsInfoResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("ImageFsInfo") + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("ImageFsInfo failed") + } else { + log.G(ctx).Debugf("ImageFsInfo returns filesystem info %+v", res.ImageFilesystems) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ImageFsInfoRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ImageFsInfoResponse + v1res, err = in.c.ImageFsInfo(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ImageFsInfoResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Error("ImageFsInfo failed") + } + } + } + return res, errdefs.ToGRPC(err) +} + +func (in *instrumentedService) PodSandboxStats(ctx context.Context, r *runtime.PodSandboxStatsRequest) (res *runtime.PodSandboxStatsResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("PodSandboxStats for %q", r.GetPodSandboxId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("PodSandboxStats for %q failed", r.GetPodSandboxId()) + } else { + log.G(ctx).Debugf("PodSandboxStats for %q returns stats %+v", r.GetPodSandboxId(), res.GetStats()) + } + }() + res, err = in.c.PodSandboxStats(ctrdutil.WithNamespace(ctx), r) + return res, errdefs.ToGRPC(err) +} + +func (in *instrumentedAlphaService) PodSandboxStats(ctx context.Context, r *runtime_alpha.PodSandboxStatsRequest) (res *runtime_alpha.PodSandboxStatsResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("PodSandboxStats for %q", r.GetPodSandboxId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("PodSandboxStats for %q failed", r.GetPodSandboxId()) + } else { + log.G(ctx).Debugf("PodSandboxStats for %q returns stats %+v", r.GetPodSandboxId(), res.GetStats()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.PodSandboxStatsRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.PodSandboxStatsResponse + v1res, err = in.c.PodSandboxStats(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.PodSandboxStatsResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(err).Errorf("PodSandboxStats for %q failed", r.GetPodSandboxId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ContainerStats(ctx context.Context, r *runtime.ContainerStatsRequest) (res *runtime.ContainerStatsResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -407,6 +1267,96 @@ func (in *instrumentedService) ContainerStats(ctx context.Context, r *runtime.Co return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ContainerStats(ctx context.Context, r *runtime_alpha.ContainerStatsRequest) (res *runtime_alpha.ContainerStatsResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("ContainerStats for %q", r.GetContainerId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("ContainerStats for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Debugf("ContainerStats for %q returns stats %+v", r.GetContainerId(), res.GetStats()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ContainerStatsRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ContainerStatsResponse + v1res, err = in.c.ContainerStats(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ContainerStatsResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("ContainerStats for %q failed", r.GetContainerId()) + } + } + } + return res, errdefs.ToGRPC(err) +} + +func (in *instrumentedService) ListPodSandboxStats(ctx context.Context, r *runtime.ListPodSandboxStatsRequest) (res *runtime.ListPodSandboxStatsResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ListPodSandboxStats with filter %+v", r.GetFilter()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("ListPodSandboxStats failed") + } else { + log.G(ctx).Tracef("ListPodSandboxStats returns stats %+v", res.GetStats()) + } + }() + res, err = in.c.ListPodSandboxStats(ctrdutil.WithNamespace(ctx), r) + return res, errdefs.ToGRPC(err) +} + +func (in *instrumentedAlphaService) ListPodSandboxStats(ctx context.Context, r *runtime_alpha.ListPodSandboxStatsRequest) (res *runtime_alpha.ListPodSandboxStatsResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ListPodSandboxStats with filter %+v", r.GetFilter()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("ListPodSandboxStats failed") + } else { + log.G(ctx).Tracef("ListPodSandboxStats returns stats %+v", res.GetStats()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ListPodSandboxStatsRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ListPodSandboxStatsResponse + v1res, err = in.c.ListPodSandboxStats(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ListPodSandboxStatsResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Error("ListPodSandboxStats failed") + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ListContainerStats(ctx context.Context, r *runtime.ListContainerStatsRequest) (res *runtime.ListContainerStatsResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -423,6 +1373,43 @@ func (in *instrumentedService) ListContainerStats(ctx context.Context, r *runtim return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) ListContainerStats(ctx context.Context, r *runtime_alpha.ListContainerStatsRequest) (res *runtime_alpha.ListContainerStatsResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("ListContainerStats with filter %+v", r.GetFilter()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("ListContainerStats failed") + } else { + log.G(ctx).Tracef("ListContainerStats returns stats %+v", res.GetStats()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.ListContainerStatsRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.ListContainerStatsResponse + v1res, err = in.c.ListContainerStats(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.ListContainerStatsResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Error("ListContainerStats failed") + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) Status(ctx context.Context, r *runtime.StatusRequest) (res *runtime.StatusResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -439,6 +1426,43 @@ func (in *instrumentedService) Status(ctx context.Context, r *runtime.StatusRequ return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) Status(ctx context.Context, r *runtime_alpha.StatusRequest) (res *runtime_alpha.StatusResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("Status") + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("Status failed") + } else { + log.G(ctx).Tracef("Status returns status %+v", res.GetStatus()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.StatusRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.StatusResponse + v1res, err = in.c.Status(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.StatusResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Error("Status failed") + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) Version(ctx context.Context, r *runtime.VersionRequest) (res *runtime.VersionResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -455,6 +1479,22 @@ func (in *instrumentedService) Version(ctx context.Context, r *runtime.VersionRe return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) Version(ctx context.Context, r *runtime_alpha.VersionRequest) (res *runtime_alpha.VersionResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Tracef("Version with client side version %q", r.GetVersion()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("Version failed") + } else { + log.G(ctx).Tracef("Version returns %+v", res) + } + }() + res, err = in.c.AlphaVersion(ctrdutil.WithNamespace(ctx), r) + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) UpdateRuntimeConfig(ctx context.Context, r *runtime.UpdateRuntimeConfigRequest) (res *runtime.UpdateRuntimeConfigResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -464,13 +1504,50 @@ func (in *instrumentedService) UpdateRuntimeConfig(ctx context.Context, r *runti if err != nil { log.G(ctx).WithError(err).Error("UpdateRuntimeConfig failed") } else { - log.G(ctx).Debug("UpdateRuntimeConfig returns returns successfully") + log.G(ctx).Debug("UpdateRuntimeConfig returns successfully") } }() res, err = in.c.UpdateRuntimeConfig(ctrdutil.WithNamespace(ctx), r) return res, errdefs.ToGRPC(err) } +func (in *instrumentedAlphaService) UpdateRuntimeConfig(ctx context.Context, r *runtime_alpha.UpdateRuntimeConfigRequest) (res *runtime_alpha.UpdateRuntimeConfigResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("UpdateRuntimeConfig with config %+v", r.GetRuntimeConfig()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Error("UpdateRuntimeConfig failed") + } else { + log.G(ctx).Debug("UpdateRuntimeConfig returns successfully") + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + var v1r runtime.UpdateRuntimeConfigRequest + if err := alphaReqToV1Req(r, &v1r); err != nil { + return nil, errdefs.ToGRPC(err) + } + var v1res *runtime.UpdateRuntimeConfigResponse + v1res, err = in.c.UpdateRuntimeConfig(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + resp := &runtime_alpha.UpdateRuntimeConfigResponse{} + perr := v1RespToAlphaResp(v1res, resp) + if perr == nil { + res = resp + } else { + // actual error has precidence on error returned vs parse error issues + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Error("UpdateRuntimeConfig failed") + } + } + } + return res, errdefs.ToGRPC(err) +} + func (in *instrumentedService) ReopenContainerLog(ctx context.Context, r *runtime.ReopenContainerLogRequest) (res *runtime.ReopenContainerLogResponse, err error) { if err := in.checkInitialized(); err != nil { return nil, err @@ -486,3 +1563,110 @@ func (in *instrumentedService) ReopenContainerLog(ctx context.Context, r *runtim res, err = in.c.ReopenContainerLog(ctrdutil.WithNamespace(ctx), r) return res, errdefs.ToGRPC(err) } + +func (in *instrumentedAlphaService) ReopenContainerLog(ctx context.Context, r *runtime_alpha.ReopenContainerLogRequest) (res *runtime_alpha.ReopenContainerLogResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + log.G(ctx).Debugf("ReopenContainerLog for %q", r.GetContainerId()) + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("ReopenContainerLog for %q failed", r.GetContainerId()) + } else { + log.G(ctx).Debugf("ReopenContainerLog for %q returns successfully", r.GetContainerId()) + } + }() + // converts request and response for earlier CRI version to call and get response from the current version + p, err := r.Marshal() + if err == nil { + var v1r runtime.ReopenContainerLogRequest + if err = v1r.Unmarshal(p); err == nil { + var v1res *runtime.ReopenContainerLogResponse + v1res, err = in.c.ReopenContainerLog(ctrdutil.WithNamespace(ctx), &v1r) + if v1res != nil { + p, perr := v1res.Marshal() + if perr == nil { + resp := &runtime_alpha.ReopenContainerLogResponse{} + if perr = resp.Unmarshal(p); perr == nil { + res = resp + } + } + // actual error has precidence on error returned vs parse error issues + if perr != nil { + if err == nil { + err = perr + } else { + // extra log entry if convert response parse error and request error + log.G(ctx).WithError(perr).Errorf("ReopenContainerLog for %q failed", r.GetContainerId()) + } + } + } + } + } + return res, errdefs.ToGRPC(err) +} + +func (in *instrumentedService) CheckpointContainer(ctx context.Context, r *runtime.CheckpointContainerRequest) (res *runtime.CheckpointContainerResponse, err error) { + if err := in.checkInitialized(); err != nil { + return nil, err + } + + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("CheckpointContainer failed, error") + } else { + log.G(ctx).Debug("CheckpointContainer returns successfully") + } + }() + + res, err = in.c.CheckpointContainer(ctx, r) + return res, errdefs.ToGRPC(err) +} + +func (in *instrumentedService) GetContainerEvents(r *runtime.GetEventsRequest, s runtime.RuntimeService_GetContainerEventsServer) (err error) { + if err := in.checkInitialized(); err != nil { + return err + } + + ctx := s.Context() + defer func() { + if err != nil { + log.G(ctx).WithError(err).Errorf("GetContainerEvents failed, error") + } else { + log.G(ctx).Debug("GetContainerEvents returns successfully") + } + }() + + err = in.c.GetContainerEvents(r, s) + return errdefs.ToGRPC(err) +} + +func alphaReqToV1Req( + alphar interface{ Marshal() ([]byte, error) }, + v1r interface{ Unmarshal(_ []byte) error }, +) error { + p, err := alphar.Marshal() + if err != nil { + return err + } + + if err = v1r.Unmarshal(p); err != nil { + return err + } + return nil +} + +func v1RespToAlphaResp( + v1res interface{ Marshal() ([]byte, error) }, + alphares interface{ Unmarshal(_ []byte) error }, +) error { + p, err := v1res.Marshal() + if err != nil { + return err + } + + if err = alphares.Unmarshal(p); err != nil { + return err + } + return nil +} diff --git a/pkg/cri/server/metrics.go b/pkg/cri/server/metrics.go new file mode 100644 index 0000000..0e285fe --- /dev/null +++ b/pkg/cri/server/metrics.go @@ -0,0 +1,74 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + metrics "github.com/docker/go-metrics" +) + +var ( + sandboxListTimer metrics.Timer + sandboxCreateNetworkTimer metrics.Timer + sandboxDeleteNetwork metrics.Timer + + sandboxRuntimeCreateTimer metrics.LabeledTimer + sandboxRuntimeStopTimer metrics.LabeledTimer + sandboxRemoveTimer metrics.LabeledTimer + + containerListTimer metrics.Timer + containerRemoveTimer metrics.LabeledTimer + containerCreateTimer metrics.LabeledTimer + containerStopTimer metrics.LabeledTimer + containerStartTimer metrics.LabeledTimer + + networkPluginOperations metrics.LabeledCounter + networkPluginOperationsErrors metrics.LabeledCounter + networkPluginOperationsLatency metrics.LabeledTimer +) + +func init() { + // these CRI metrics record latencies for successful operations around a sandbox and container's lifecycle. + ns := metrics.NewNamespace("containerd", "cri", nil) + + sandboxListTimer = ns.NewTimer("sandbox_list", "time to list sandboxes") + sandboxCreateNetworkTimer = ns.NewTimer("sandbox_create_network", "time to create the network for a sandbox") + sandboxDeleteNetwork = ns.NewTimer("sandbox_delete_network", "time to delete a sandbox's network") + + sandboxRuntimeCreateTimer = ns.NewLabeledTimer("sandbox_runtime_create", "time to create a sandbox in the runtime", "runtime") + sandboxRuntimeStopTimer = ns.NewLabeledTimer("sandbox_runtime_stop", "time to stop a sandbox", "runtime") + sandboxRemoveTimer = ns.NewLabeledTimer("sandbox_remove", "time to remove a sandbox", "runtime") + + containerListTimer = ns.NewTimer("container_list", "time to list containers") + containerRemoveTimer = ns.NewLabeledTimer("container_remove", "time to remove a container", "runtime") + containerCreateTimer = ns.NewLabeledTimer("container_create", "time to create a container", "runtime") + containerStopTimer = ns.NewLabeledTimer("container_stop", "time to stop a container", "runtime") + containerStartTimer = ns.NewLabeledTimer("container_start", "time to start a container", "runtime") + + networkPluginOperations = ns.NewLabeledCounter("network_plugin_operations_total", "cumulative number of network plugin operations by operation type", "operation_type") + networkPluginOperationsErrors = ns.NewLabeledCounter("network_plugin_operations_errors_total", "cumulative number of network plugin operations by operation type", "operation_type") + networkPluginOperationsLatency = ns.NewLabeledTimer("network_plugin_operations_duration_seconds", "latency in seconds of network plugin operations. Broken down by operation type", "operation_type") + + metrics.Register(ns) +} + +// for backwards compatibility with kubelet/dockershim metrics +// https://github.com/containerd/containerd/issues/7801 +const ( + networkStatusOp = "get_pod_network_status" + networkSetUpOp = "set_up_pod" + networkTearDownOp = "tear_down_pod" +) diff --git a/pkg/cri/server/rdt_linux.go b/pkg/cri/server/rdt_linux.go new file mode 100644 index 0000000..38a9395 --- /dev/null +++ b/pkg/cri/server/rdt_linux.go @@ -0,0 +1,51 @@ +//go:build !no_rdt +// +build !no_rdt + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "fmt" + + "github.com/containerd/containerd/services/tasks" + "github.com/intel/goresctrl/pkg/rdt" + "github.com/sirupsen/logrus" +) + +// rdtClassFromAnnotations examines container and pod annotations of a +// container and returns its effective RDT class. +func (c *criService) rdtClassFromAnnotations(containerName string, containerAnnotations, podAnnotations map[string]string) (string, error) { + cls, err := rdt.ContainerClassFromAnnotations(containerName, containerAnnotations, podAnnotations) + + if err == nil { + // Our internal check that RDT has been enabled + if cls != "" && !tasks.RdtEnabled() { + err = fmt.Errorf("RDT disabled, refusing to set RDT class of container %q to %q", containerName, cls) + } + } + + if err != nil { + if !tasks.RdtEnabled() && c.config.ContainerdConfig.IgnoreRdtNotEnabledErrors { + logrus.Debugf("continuing create container %s, ignoring rdt not enabled (%v)", containerName, err) + return "", nil + } + return "", err + } + + return cls, nil +} diff --git a/vendor/github.com/containerd/continuity/devices/mknod_unix.go b/pkg/cri/server/rdt_stub_linux.go similarity index 74% rename from vendor/github.com/containerd/continuity/devices/mknod_unix.go rename to pkg/cri/server/rdt_stub_linux.go index d9e7a7a..1794935 100644 --- a/vendor/github.com/containerd/continuity/devices/mknod_unix.go +++ b/pkg/cri/server/rdt_stub_linux.go @@ -1,4 +1,5 @@ -// +build linux darwin solaris +//go:build no_rdt +// +build no_rdt /* Copyright The containerd Authors. @@ -16,10 +17,8 @@ limitations under the License. */ -package devices +package server -import "golang.org/x/sys/unix" - -func mknod(path string, mode uint32, dev uint64) (err error) { - return unix.Mknod(path, mode, int(dev)) +func (c *criService) rdtClassFromAnnotations(containerName string, containerAnnotations, podAnnotations map[string]string) (string, error) { + return "", nil } diff --git a/pkg/cri/server/restart.go b/pkg/cri/server/restart.go index 68e7e7e..f5f3e50 100644 --- a/pkg/cri/server/restart.go +++ b/pkg/cri/server/restart.go @@ -17,7 +17,7 @@ package server import ( - "io/ioutil" + "fmt" "os" "path/filepath" goruntime "runtime" @@ -30,9 +30,8 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/containerd/typeurl" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cio "github.com/containerd/containerd/pkg/cri/io" containerstore "github.com/containerd/containerd/pkg/cri/store/container" @@ -55,7 +54,7 @@ func (c *criService) recover(ctx context.Context) error { // Recover all sandboxes. sandboxes, err := c.client.Containers(ctx, filterLabel(containerKindLabel, containerKindSandbox)) if err != nil { - return errors.Wrap(err, "failed to list sandbox containers") + return fmt.Errorf("failed to list sandbox containers: %w", err) } for _, sandbox := range sandboxes { sb, err := c.loadSandbox(ctx, sandbox) @@ -65,17 +64,17 @@ func (c *criService) recover(ctx context.Context) error { } log.G(ctx).Debugf("Loaded sandbox %+v", sb) if err := c.sandboxStore.Add(sb); err != nil { - return errors.Wrapf(err, "failed to add sandbox %q to store", sandbox.ID()) + return fmt.Errorf("failed to add sandbox %q to store: %w", sandbox.ID(), err) } if err := c.sandboxNameIndex.Reserve(sb.Name, sb.ID); err != nil { - return errors.Wrapf(err, "failed to reserve sandbox name %q", sb.Name) + return fmt.Errorf("failed to reserve sandbox name %q: %w", sb.Name, err) } } // Recover all containers. containers, err := c.client.Containers(ctx, filterLabel(containerKindLabel, containerKindContainer)) if err != nil { - return errors.Wrap(err, "failed to list containers") + return fmt.Errorf("failed to list containers: %w", err) } for _, container := range containers { cntr, err := c.loadContainer(ctx, container) @@ -85,17 +84,17 @@ func (c *criService) recover(ctx context.Context) error { } log.G(ctx).Debugf("Loaded container %+v", cntr) if err := c.containerStore.Add(cntr); err != nil { - return errors.Wrapf(err, "failed to add container %q to store", container.ID()) + return fmt.Errorf("failed to add container %q to store: %w", container.ID(), err) } if err := c.containerNameIndex.Reserve(cntr.Name, cntr.ID); err != nil { - return errors.Wrapf(err, "failed to reserve container name %q", cntr.Name) + return fmt.Errorf("failed to reserve container name %q: %w", cntr.Name, err) } } // Recover all images. cImages, err := c.client.ListImages(ctx) if err != nil { - return errors.Wrap(err, "failed to list images") + return fmt.Errorf("failed to list images: %w", err) } c.loadImages(ctx, cImages) @@ -131,7 +130,7 @@ func (c *criService) recover(ctx context.Context) error { }, } { if err := cleanupOrphanedIDDirs(ctx, cleanup.cntrs, cleanup.base); err != nil { - return errors.Wrap(err, cleanup.errMsg) + return fmt.Errorf("%s: %w", cleanup.errMsg, err) } } return nil @@ -161,15 +160,15 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe // Load container metadata. exts, err := cntr.Extensions(ctx) if err != nil { - return container, errors.Wrap(err, "failed to get container extensions") + return container, fmt.Errorf("failed to get container extensions: %w", err) } ext, ok := exts[containerMetadataExtension] if !ok { - return container, errors.Errorf("metadata extension %q not found", containerMetadataExtension) + return container, fmt.Errorf("metadata extension %q not found", containerMetadataExtension) } data, err := typeurl.UnmarshalAny(&ext) if err != nil { - return container, errors.Wrapf(err, "failed to unmarshal metadata extension %q", ext) + return container, fmt.Errorf("failed to unmarshal metadata extension %q: %w", ext, err) } meta := data.(*containerstore.Metadata) @@ -209,7 +208,7 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe return containerIO, nil }) if err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to load task") + return fmt.Errorf("failed to load task: %w", err) } var s containerd.Status var notFound bool @@ -222,7 +221,7 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe if err != nil { // It's still possible that task is deleted during this window. if !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to get task status") + return fmt.Errorf("failed to get task status: %w", err) } notFound = true } @@ -239,7 +238,7 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe cio.WithNewFIFOs(volatileContainerDir, meta.Config.GetTty(), meta.Config.GetStdin()), ) if err != nil { - return errors.Wrap(err, "failed to create container io") + return fmt.Errorf("failed to create container io: %w", err) } case runtime.ContainerState_CONTAINER_RUNNING: // Container was in running state, but its task has been deleted, @@ -258,17 +257,17 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe // gets restarted during container start. // Container must be in `CREATED` state. if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to delete task") + return fmt.Errorf("failed to delete task: %w", err) } if status.State() != runtime.ContainerState_CONTAINER_CREATED { - return errors.Errorf("unexpected container state for created task: %q", status.State()) + return fmt.Errorf("unexpected container state for created task: %q", status.State()) } case containerd.Running: // Task is running. Container must be in `RUNNING` state, based on our assumption that // "task should not be started when containerd is down". switch status.State() { case runtime.ContainerState_CONTAINER_EXITED: - return errors.Errorf("unexpected container state for running task: %q", status.State()) + return fmt.Errorf("unexpected container state for running task: %q", status.State()) case runtime.ContainerState_CONTAINER_RUNNING: default: // This may happen if containerd gets restarted after task is started, but @@ -281,7 +280,7 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe exitCh, err := t.Wait(ctrdutil.NamespacedContext()) if err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to wait for task") + return fmt.Errorf("failed to wait for task: %w", err) } // Container was in running state, but its task has been deleted, // set unknown exited state. @@ -295,12 +294,12 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe case containerd.Stopped: // Task is stopped. Updata status and delete the task. if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to delete task") + return fmt.Errorf("failed to delete task: %w", err) } status.FinishedAt = s.ExitTime.UnixNano() status.ExitCode = int32(s.ExitStatus) default: - return errors.Errorf("unexpected task status %q", s.Status) + return fmt.Errorf("unexpected task status %q", s.Status) } } return nil @@ -330,15 +329,15 @@ func (c *criService) loadSandbox(ctx context.Context, cntr containerd.Container) // Load sandbox metadata. exts, err := cntr.Extensions(ctx) if err != nil { - return sandbox, errors.Wrap(err, "failed to get sandbox container extensions") + return sandbox, fmt.Errorf("failed to get sandbox container extensions: %w", err) } ext, ok := exts[sandboxMetadataExtension] if !ok { - return sandbox, errors.Errorf("metadata extension %q not found", sandboxMetadataExtension) + return sandbox, fmt.Errorf("metadata extension %q not found", sandboxMetadataExtension) } data, err := typeurl.UnmarshalAny(&ext) if err != nil { - return sandbox, errors.Wrapf(err, "failed to unmarshal metadata extension %q", ext) + return sandbox, fmt.Errorf("failed to unmarshal metadata extension %q: %w", ext, err) } meta := data.(*sandboxstore.Metadata) @@ -347,14 +346,14 @@ func (c *criService) loadSandbox(ctx context.Context, cntr containerd.Container) // Load sandbox created timestamp. info, err := cntr.Info(ctx) if err != nil { - return status, errors.Wrap(err, "failed to get sandbox container info") + return status, fmt.Errorf("failed to get sandbox container info: %w", err) } status.CreatedAt = info.CreatedAt // Load sandbox state. t, err := cntr.Task(ctx, nil) if err != nil && !errdefs.IsNotFound(err) { - return status, errors.Wrap(err, "failed to load task") + return status, fmt.Errorf("failed to load task: %w", err) } var taskStatus containerd.Status var notFound bool @@ -367,7 +366,7 @@ func (c *criService) loadSandbox(ctx context.Context, cntr containerd.Container) if err != nil { // It's still possible that task is deleted during this window. if !errdefs.IsNotFound(err) { - return status, errors.Wrap(err, "failed to get task status") + return status, fmt.Errorf("failed to get task status: %w", err) } notFound = true } @@ -382,7 +381,7 @@ func (c *criService) loadSandbox(ctx context.Context, cntr containerd.Container) exitCh, err := t.Wait(ctrdutil.NamespacedContext()) if err != nil { if !errdefs.IsNotFound(err) { - return status, errors.Wrap(err, "failed to wait for task") + return status, fmt.Errorf("failed to wait for task: %w", err) } status.State = sandboxstore.StateNotReady } else { @@ -394,7 +393,7 @@ func (c *criService) loadSandbox(ctx context.Context, cntr containerd.Container) } else { // Task is not running. Delete the task and set sandbox state as NOTREADY. if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) { - return status, errors.Wrap(err, "failed to delete task") + return status, fmt.Errorf("failed to delete task: %w", err) } status.State = sandboxstore.StateNotReady } @@ -414,6 +413,9 @@ func (c *criService) loadSandbox(ctx context.Context, cntr containerd.Container) // Don't need to load netns for host network sandbox. return sandbox, nil } + if goruntime.GOOS == "windows" && meta.Config.GetWindows().GetSecurityContext().GetHostProcess() { + return sandbox, nil + } sandbox.NetNS = netns.LoadNetNS(meta.NetNSPath) // It doesn't matter whether task is running or not. If it is running, sandbox @@ -455,9 +457,9 @@ func (c *criService) loadImages(ctx context.Context, cImages []containerd.Image) func cleanupOrphanedIDDirs(ctx context.Context, cntrs []containerd.Container, base string) error { // Cleanup orphaned id directories. - dirs, err := ioutil.ReadDir(base) + dirs, err := os.ReadDir(base) if err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to read base directory") + return fmt.Errorf("failed to read base directory: %w", err) } idsMap := make(map[string]containerd.Container) for _, cntr := range cntrs { diff --git a/pkg/cri/server/sandbox_list.go b/pkg/cri/server/sandbox_list.go index a0835aa..b4f0722 100644 --- a/pkg/cri/server/sandbox_list.go +++ b/pkg/cri/server/sandbox_list.go @@ -17,14 +17,17 @@ package server import ( + "time" + "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" ) // ListPodSandbox returns a list of Sandbox. func (c *criService) ListPodSandbox(ctx context.Context, r *runtime.ListPodSandboxRequest) (*runtime.ListPodSandboxResponse, error) { + start := time.Now() // List all sandboxes from store. sandboxesInStore := c.sandboxStore.List() var sandboxes []*runtime.PodSandbox @@ -36,6 +39,8 @@ func (c *criService) ListPodSandbox(ctx context.Context, r *runtime.ListPodSandb } sandboxes = c.filterCRISandboxes(sandboxes, r.GetFilter()) + + sandboxListTimer.UpdateSince(start) return &runtime.ListPodSandboxResponse{Items: sandboxes}, nil } @@ -63,6 +68,12 @@ func (c *criService) normalizePodSandboxFilter(filter *runtime.PodSandboxFilter) } } +func (c *criService) normalizePodSandboxStatsFilter(filter *runtime.PodSandboxStatsFilter) { + if sb, err := c.sandboxStore.Get(filter.GetId()); err == nil { + filter.Id = sb.ID + } +} + // filterCRISandboxes filters CRISandboxes. func (c *criService) filterCRISandboxes(sandboxes []*runtime.PodSandbox, filter *runtime.PodSandboxFilter) []*runtime.PodSandbox { if filter == nil { diff --git a/pkg/cri/server/sandbox_list_test.go b/pkg/cri/server/sandbox_list_test.go index dd4ac21..302f887 100644 --- a/pkg/cri/server/sandbox_list_test.go +++ b/pkg/cri/server/sandbox_list_test.go @@ -21,7 +21,7 @@ import ( "time" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" ) diff --git a/pkg/cri/server/sandbox_portforward.go b/pkg/cri/server/sandbox_portforward.go index 00fb292..29ffb58 100644 --- a/pkg/cri/server/sandbox_portforward.go +++ b/pkg/cri/server/sandbox_portforward.go @@ -17,9 +17,11 @@ package server import ( - "github.com/pkg/errors" + "errors" + "fmt" + "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" ) @@ -28,7 +30,7 @@ import ( func (c *criService) PortForward(ctx context.Context, r *runtime.PortForwardRequest) (retRes *runtime.PortForwardResponse, retErr error) { sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) if err != nil { - return nil, errors.Wrapf(err, "failed to find sandbox %q", r.GetPodSandboxId()) + return nil, fmt.Errorf("failed to find sandbox %q: %w", r.GetPodSandboxId(), err) } if sandbox.Status.Get().State != sandboxstore.StateReady { return nil, errors.New("sandbox container is not running") diff --git a/pkg/cri/server/sandbox_portforward_linux.go b/pkg/cri/server/sandbox_portforward_linux.go index 33e3e83..3eda97e 100644 --- a/pkg/cri/server/sandbox_portforward_linux.go +++ b/pkg/cri/server/sandbox_portforward_linux.go @@ -24,18 +24,17 @@ import ( "github.com/containerd/containerd/log" "github.com/containernetworking/plugins/pkg/ns" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // portForward uses netns to enter the sandbox namespace, and forwards a stream inside the -// the namespace to a specific port. It keeps forwarding until it exits or client disconnect. +// namespace to a specific port. It keeps forwarding until it exits or client disconnect. func (c *criService) portForward(ctx context.Context, id string, port int32, stream io.ReadWriteCloser) error { s, err := c.sandboxStore.Get(id) if err != nil { - return errors.Wrapf(err, "failed to find sandbox %q in store", id) + return fmt.Errorf("failed to find sandbox %q in store: %w", id, err) } var netNSDo func(func(ns.NetNS) error) error @@ -45,9 +44,9 @@ func (c *criService) portForward(ctx context.Context, id string, port int32, str hostNet := securityContext.GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE if !hostNet { if closed, err := s.NetNS.Closed(); err != nil { - return errors.Wrapf(err, "failed to check netwok namespace closed for sandbox %q", id) + return fmt.Errorf("failed to check netwok namespace closed for sandbox %q: %w", id, err) } else if closed { - return errors.Errorf("network namespace for sandbox %q is closed", id) + return fmt.Errorf("network namespace for sandbox %q is closed", id) } netNSDo = s.NetNS.Do netNSPath = s.NetNS.GetPath() @@ -84,7 +83,7 @@ func (c *criService) portForward(ctx context.Context, id string, port int32, str defer conn.Close() errCh := make(chan error, 2) - // Copy from the the namespace port connection to the client stream + // Copy from the namespace port connection to the client stream go func() { log.G(ctx).Debugf("PortForward copying data from namespace %q port %d to the client stream", id, port) _, err := io.Copy(stream, conn) @@ -130,7 +129,7 @@ func (c *criService) portForward(ctx context.Context, id string, port int32, str }) if err != nil { - return errors.Wrapf(err, "failed to execute portforward in network namespace %q", netNSPath) + return fmt.Errorf("failed to execute portforward in network namespace %q: %w", netNSPath, err) } log.G(ctx).Infof("Finish port forwarding for %q port %d", id, port) diff --git a/pkg/cri/server/sandbox_portforward_other.go b/pkg/cri/server/sandbox_portforward_other.go index 1b88170..2861ddc 100644 --- a/pkg/cri/server/sandbox_portforward_other.go +++ b/pkg/cri/server/sandbox_portforward_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -19,15 +20,15 @@ package server import ( + "fmt" "io" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" "golang.org/x/net/context" ) // portForward uses netns to enter the sandbox namespace, and forwards a stream inside the -// the namespace to a specific port. It keeps forwarding until it exits or client disconnect. +// namespace to a specific port. It keeps forwarding until it exits or client disconnect. func (c *criService) portForward(ctx context.Context, id string, port int32, stream io.ReadWriteCloser) error { - return errors.Wrap(errdefs.ErrNotImplemented, "port forward") + return fmt.Errorf("port forward: %w", errdefs.ErrNotImplemented) } diff --git a/pkg/cri/server/sandbox_portforward_windows.go b/pkg/cri/server/sandbox_portforward_windows.go index 70f1ef8..d5bf776 100644 --- a/pkg/cri/server/sandbox_portforward_windows.go +++ b/pkg/cri/server/sandbox_portforward_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -23,24 +21,23 @@ import ( "fmt" "io" - "github.com/pkg/errors" "golang.org/x/net/context" "k8s.io/utils/exec" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" - "github.com/containerd/containerd/pkg/ioutil" + cioutil "github.com/containerd/containerd/pkg/ioutil" ) func (c *criService) portForward(ctx context.Context, id string, port int32, stream io.ReadWriter) error { - stdout := ioutil.NewNopWriteCloser(stream) + stdout := cioutil.NewNopWriteCloser(stream) stderrBuffer := new(bytes.Buffer) - stderr := ioutil.NewNopWriteCloser(stderrBuffer) + stderr := cioutil.NewNopWriteCloser(stderrBuffer) // localhost is resolved to 127.0.0.1 in ipv4, and ::1 in ipv6. // Explicitly using ipv4 IP address in here to avoid flakiness. cmd := []string{"wincat.exe", "127.0.0.1", fmt.Sprint(port)} err := c.execInSandbox(ctx, id, cmd, stream, stdout, stderr) if err != nil { - return errors.Wrapf(err, "failed to execute port forward in sandbox: %s", stderrBuffer.String()) + return fmt.Errorf("failed to execute port forward in sandbox: %s: %w", stderrBuffer.String(), err) } return nil } @@ -49,13 +46,13 @@ func (c *criService) execInSandbox(ctx context.Context, sandboxID string, cmd [] // Get sandbox from our sandbox store. sb, err := c.sandboxStore.Get(sandboxID) if err != nil { - return errors.Wrapf(err, "failed to find sandbox %q in store", sandboxID) + return fmt.Errorf("failed to find sandbox %q in store: %w", sandboxID, err) } // Check the sandbox state state := sb.Status.Get().State if state != sandboxstore.StateReady { - return errors.Errorf("sandbox is in %s state", fmt.Sprint(state)) + return fmt.Errorf("sandbox is in %s state", fmt.Sprint(state)) } opts := execOptions{ @@ -68,13 +65,13 @@ func (c *criService) execInSandbox(ctx context.Context, sandboxID string, cmd [] } exitCode, err := c.execInternal(ctx, sb.Container, sandboxID, opts) if err != nil { - return errors.Wrap(err, "failed to exec in sandbox") + return fmt.Errorf("failed to exec in sandbox: %w", err) } if *exitCode == 0 { return nil } return &exec.CodeExitError{ - Err: errors.Errorf("error executing command %v, exit code %d", cmd, *exitCode), + Err: fmt.Errorf("error executing command %v, exit code %d", cmd, *exitCode), Code: int(*exitCode), } } diff --git a/pkg/cri/server/sandbox_remove.go b/pkg/cri/server/sandbox_remove.go index 25572af..4f5a4f7 100644 --- a/pkg/cri/server/sandbox_remove.go +++ b/pkg/cri/server/sandbox_remove.go @@ -17,26 +17,27 @@ package server import ( + "fmt" + "time" + "github.com/containerd/containerd" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - - "github.com/containerd/containerd/pkg/cri/store" - sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // RemovePodSandbox removes the sandbox. If there are running containers in the // sandbox, they should be forcibly removed. func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodSandboxRequest) (*runtime.RemovePodSandboxResponse, error) { + start := time.Now() sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) if err != nil { - if err != store.ErrNotExist { - return nil, errors.Wrapf(err, "an error occurred when try to find sandbox %q", - r.GetPodSandboxId()) + if !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("an error occurred when try to find sandbox %q: %w", + r.GetPodSandboxId(), err) } // Do not return error if the id doesn't exist. log.G(ctx).Tracef("RemovePodSandbox called for sandbox %q that does not exist", @@ -46,22 +47,21 @@ func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodS // Use the full sandbox id. id := sandbox.ID - // If the sandbox is still running or in an unknown state, forcibly stop it. - state := sandbox.Status.Get().State - if state == sandboxstore.StateReady || state == sandboxstore.StateUnknown { - logrus.Infof("Forcibly stopping sandbox %q", id) - if err := c.stopPodSandbox(ctx, sandbox); err != nil { - return nil, errors.Wrapf(err, "failed to forcibly stop sandbox %q", id) - } + // If the sandbox is still running, not ready, or in an unknown state, forcibly stop it. + // Even if it's in a NotReady state, this will close its network namespace, if open. + // This can happen if the task process associated with the Pod died or it was killed. + logrus.Infof("Forcibly stopping sandbox %q", id) + if err := c.stopPodSandbox(ctx, sandbox); err != nil { + return nil, fmt.Errorf("failed to forcibly stop sandbox %q: %w", id, err) } // Return error if sandbox network namespace is not closed yet. if sandbox.NetNS != nil { nsPath := sandbox.NetNS.GetPath() if closed, err := sandbox.NetNS.Closed(); err != nil { - return nil, errors.Wrapf(err, "failed to check sandbox network namespace %q closed", nsPath) + return nil, fmt.Errorf("failed to check sandbox network namespace %q closed: %w", nsPath, err) } else if !closed { - return nil, errors.Errorf("sandbox network namespace %q is not fully closed", nsPath) + return nil, fmt.Errorf("sandbox network namespace %q is not fully closed", nsPath) } } @@ -77,26 +77,26 @@ func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodS } _, err = c.RemoveContainer(ctx, &runtime.RemoveContainerRequest{ContainerId: cntr.ID}) if err != nil { - return nil, errors.Wrapf(err, "failed to remove container %q", cntr.ID) + return nil, fmt.Errorf("failed to remove container %q: %w", cntr.ID, err) } } // Cleanup the sandbox root directories. sandboxRootDir := c.getSandboxRootDir(id) if err := ensureRemoveAll(ctx, sandboxRootDir); err != nil { - return nil, errors.Wrapf(err, "failed to remove sandbox root directory %q", - sandboxRootDir) + return nil, fmt.Errorf("failed to remove sandbox root directory %q: %w", + sandboxRootDir, err) } volatileSandboxRootDir := c.getVolatileSandboxRootDir(id) if err := ensureRemoveAll(ctx, volatileSandboxRootDir); err != nil { - return nil, errors.Wrapf(err, "failed to remove volatile sandbox root directory %q", - volatileSandboxRootDir) + return nil, fmt.Errorf("failed to remove volatile sandbox root directory %q: %w", + volatileSandboxRootDir, err) } // Delete sandbox container. if err := sandbox.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { if !errdefs.IsNotFound(err) { - return nil, errors.Wrapf(err, "failed to delete sandbox container %q", id) + return nil, fmt.Errorf("failed to delete sandbox container %q: %w", id, err) } log.G(ctx).Tracef("Remove called for sandbox container %q that does not exist", id) } @@ -111,5 +111,7 @@ func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodS // Release the sandbox name reserved for the sandbox. c.sandboxNameIndex.ReleaseByKey(id) + sandboxRemoveTimer.WithValues(sandbox.RuntimeHandler).UpdateSince(start) + return &runtime.RemovePodSandboxResponse{}, nil } diff --git a/pkg/cri/server/sandbox_run.go b/pkg/cri/server/sandbox_run.go index 5060d57..5f19ffb 100644 --- a/pkg/cri/server/sandbox_run.go +++ b/pkg/cri/server/sandbox_run.go @@ -18,24 +18,27 @@ package server import ( "encoding/json" + "errors" + "fmt" "math" "path/filepath" goruntime "runtime" "strings" + "time" "github.com/containerd/containerd" containerdio "github.com/containerd/containerd/cio" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/snapshots" cni "github.com/containerd/go-cni" "github.com/containerd/nri" v1 "github.com/containerd/nri/types/v1" "github.com/containerd/typeurl" "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" criconfig "github.com/containerd/containerd/pkg/cri/config" @@ -45,7 +48,6 @@ import ( "github.com/containerd/containerd/pkg/cri/util" ctrdutil "github.com/containerd/containerd/pkg/cri/util" "github.com/containerd/containerd/pkg/netns" - "github.com/containerd/containerd/snapshots" selinux "github.com/opencontainers/selinux/go-selinux" ) @@ -67,15 +69,23 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox return nil, errors.New("sandbox config must include metadata") } name := makeSandboxName(metadata) - log.G(ctx).Debugf("Generated id %q for sandbox %q", id, name) + log.G(ctx).WithField("podsandboxid", id).Debugf("generated id for sandbox name %q", name) + + // cleanupErr records the last error returned by the critical cleanup operations in deferred functions, + // like CNI teardown and stopping the running sandbox task. + // If cleanup is not completed for some reason, the CRI-plugin will leave the sandbox + // in a not-ready state, which can later be cleaned up by the next execution of the kubelet's syncPod workflow. + var cleanupErr error + // Reserve the sandbox name to avoid concurrent `RunPodSandbox` request starting the // same sandbox. if err := c.sandboxNameIndex.Reserve(name, id); err != nil { - return nil, errors.Wrapf(err, "failed to reserve sandbox name %q", name) + return nil, fmt.Errorf("failed to reserve sandbox name %q: %w", name, err) } defer func() { - // Release the name if the function returns with an error. - if retErr != nil { + // Release the name if the function returns with an error and all the resource cleanup is done. + // When cleanupErr != nil, the name will be cleaned in sandbox_remove. + if retErr != nil && cleanupErr == nil { c.sandboxNameIndex.ReleaseByName(name) } }() @@ -96,78 +106,30 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // Ensure sandbox container image snapshot. image, err := c.ensureImageExists(ctx, c.config.SandboxImage, config) if err != nil { - return nil, errors.Wrapf(err, "failed to get sandbox image %q", c.config.SandboxImage) + return nil, fmt.Errorf("failed to get sandbox image %q: %w", c.config.SandboxImage, err) } containerdImage, err := c.toContainerdImage(ctx, *image) if err != nil { - return nil, errors.Wrapf(err, "failed to get image from containerd %q", image.ID) + return nil, fmt.Errorf("failed to get image from containerd %q: %w", image.ID, err) } ociRuntime, err := c.getSandboxRuntime(config, r.GetRuntimeHandler()) if err != nil { - return nil, errors.Wrap(err, "failed to get sandbox runtime") - } - log.G(ctx).Debugf("Use OCI %+v for sandbox %q", ociRuntime, id) - - podNetwork := true - // Pod network is always needed on windows. - if goruntime.GOOS != "windows" && - config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE { - // Pod network is not needed on linux with host network. - podNetwork = false - } - if podNetwork { - // If it is not in host network namespace then create a namespace and set the sandbox - // handle. NetNSPath in sandbox metadata and NetNS is non empty only for non host network - // namespaces. If the pod is in host network namespace then both are empty and should not - // be used. - var netnsMountDir string = "/var/run/netns" - if c.config.NetNSMountsUnderStateDir { - netnsMountDir = filepath.Join(c.config.StateDir, "netns") - } - sandbox.NetNS, err = netns.NewNetNS(netnsMountDir) - if err != nil { - return nil, errors.Wrapf(err, "failed to create network namespace for sandbox %q", id) - } - sandbox.NetNSPath = sandbox.NetNS.GetPath() - defer func() { - if retErr != nil { - deferCtx, deferCancel := ctrdutil.DeferContext() - defer deferCancel() - // Teardown network if an error is returned. - if err := c.teardownPodNetwork(deferCtx, sandbox); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to destroy network for sandbox %q", id) - } - - if err := sandbox.NetNS.Remove(); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to remove network namespace %s for sandbox %q", sandbox.NetNSPath, id) - } - sandbox.NetNSPath = "" - } - }() - - // Setup network for sandbox. - // Certain VM based solutions like clear containers (Issue containerd/cri-containerd#524) - // rely on the assumption that CRI shim will not be querying the network namespace to check the - // network states such as IP. - // In future runtime implementation should avoid relying on CRI shim implementation details. - // In this case however caching the IP will add a subtle performance enhancement by avoiding - // calls to network namespace of the pod to query the IP of the veth interface on every - // SandboxStatus request. - if err := c.setupPodNetwork(ctx, &sandbox); err != nil { - return nil, errors.Wrapf(err, "failed to setup network for sandbox %q", id) - } + return nil, fmt.Errorf("failed to get sandbox runtime: %w", err) } + log.G(ctx).WithField("podsandboxid", id).Debugf("use OCI runtime %+v", ociRuntime) + runtimeStart := time.Now() // Create sandbox container. // NOTE: sandboxContainerSpec SHOULD NOT have side // effect, e.g. accessing/creating files, so that we can test // it safely. - spec, err := c.sandboxContainerSpec(id, config, &image.ImageSpec.Config, sandbox.NetNSPath, ociRuntime.PodAnnotations) + // NOTE: the network namespace path will be created later and update through updateNetNamespacePath function + spec, err := c.sandboxContainerSpec(id, config, &image.ImageSpec.Config, "", ociRuntime.PodAnnotations) if err != nil { - return nil, errors.Wrap(err, "failed to generate sandbox container spec") + return nil, fmt.Errorf("failed to generate sandbox container spec: %w", err) } - log.G(ctx).Debugf("Sandbox container %q spec: %#+v", id, spew.NewFormatter(spec)) + log.G(ctx).WithField("podsandboxid", id).Debugf("sandbox container spec: %#+v", spew.NewFormatter(spec)) sandbox.ProcessLabel = spec.Process.SelinuxLabel defer func() { if retErr != nil { @@ -189,16 +151,15 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // Generate spec options that will be applied to the spec later. specOpts, err := c.sandboxContainerSpecOpts(config, &image.ImageSpec.Config) if err != nil { - return nil, errors.Wrap(err, "failed to generate sanbdox container spec options") + return nil, fmt.Errorf("failed to generate sanbdox container spec options: %w", err) } sandboxLabels := buildLabels(config.Labels, image.ImageSpec.Config.Labels, containerKindSandbox) runtimeOpts, err := generateRuntimeOptions(ociRuntime, c.config) if err != nil { - return nil, errors.Wrap(err, "failed to generate runtime options") + return nil, fmt.Errorf("failed to generate runtime options: %w", err) } - snapshotterOpt := snapshots.WithLabels(snapshots.FilterInheritedLabels(config.Annotations)) opts := []containerd.NewContainerOpts{ containerd.WithSnapshotter(c.config.ContainerdConfig.Snapshotter), @@ -210,14 +171,29 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox container, err := c.client.NewContainer(ctx, id, opts...) if err != nil { - return nil, errors.Wrap(err, "failed to create containerd container") + return nil, fmt.Errorf("failed to create containerd container: %w", err) } + + // Add container into sandbox store in INIT state. + sandbox.Container = container + defer func() { - if retErr != nil { + // Put the sandbox into sandbox store when the some resource fails to be cleaned. + if retErr != nil && cleanupErr != nil { + log.G(ctx).WithError(cleanupErr).Errorf("encountered an error cleaning up failed sandbox %q, marking sandbox state as SANDBOX_UNKNOWN", id) + if err := c.sandboxStore.Add(sandbox); err != nil { + log.G(ctx).WithError(err).Errorf("failed to add sandbox %+v into store", sandbox) + } + } + }() + + defer func() { + // Delete container only if all the resource cleanup is done. + if retErr != nil && cleanupErr == nil { deferCtx, deferCancel := ctrdutil.DeferContext() defer deferCancel() - if err := container.Delete(deferCtx, containerd.WithSnapshotCleanup); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to delete containerd container %q", id) + if cleanupErr = container.Delete(deferCtx, containerd.WithSnapshotCleanup); cleanupErr != nil { + log.G(ctx).WithError(cleanupErr).Errorf("Failed to delete containerd container %q", id) } } }() @@ -225,8 +201,8 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // Create sandbox container root directories. sandboxRootDir := c.getSandboxRootDir(id) if err := c.os.MkdirAll(sandboxRootDir, 0755); err != nil { - return nil, errors.Wrapf(err, "failed to create sandbox root directory %q", - sandboxRootDir) + return nil, fmt.Errorf("failed to create sandbox root directory %q: %w", + sandboxRootDir, err) } defer func() { if retErr != nil { @@ -239,8 +215,8 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox }() volatileSandboxRootDir := c.getVolatileSandboxRootDir(id) if err := c.os.MkdirAll(volatileSandboxRootDir, 0755); err != nil { - return nil, errors.Wrapf(err, "failed to create volatile sandbox root directory %q", - volatileSandboxRootDir) + return nil, fmt.Errorf("failed to create volatile sandbox root directory %q: %w", + volatileSandboxRootDir, err) } defer func() { if retErr != nil { @@ -254,7 +230,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // Setup files required for the sandbox. if err = c.setupSandboxFiles(id, config); err != nil { - return nil, errors.Wrapf(err, "failed to setup sandbox files") + return nil, fmt.Errorf("failed to setup sandbox files: %w", err) } defer func() { if retErr != nil { @@ -268,7 +244,97 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // Update sandbox created timestamp. info, err := container.Info(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get sandbox container info") + return nil, fmt.Errorf("failed to get sandbox container info: %w", err) + } + + podNetwork := true + + if goruntime.GOOS != "windows" && + config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtime.NamespaceMode_NODE { + // Pod network is not needed on linux with host network. + podNetwork = false + } + if goruntime.GOOS == "windows" && + config.GetWindows().GetSecurityContext().GetHostProcess() { + // Windows HostProcess pods can only run on the host network + podNetwork = false + } + + if podNetwork { + netStart := time.Now() + + // If it is not in host network namespace then create a namespace and set the sandbox + // handle. NetNSPath in sandbox metadata and NetNS is non empty only for non host network + // namespaces. If the pod is in host network namespace then both are empty and should not + // be used. + var netnsMountDir = "/var/run/netns" + if c.config.NetNSMountsUnderStateDir { + netnsMountDir = filepath.Join(c.config.StateDir, "netns") + } + sandbox.NetNS, err = netns.NewNetNS(netnsMountDir) + if err != nil { + return nil, fmt.Errorf("failed to create network namespace for sandbox %q: %w", id, err) + } + sandbox.NetNSPath = sandbox.NetNS.GetPath() + + defer func() { + // Remove the network namespace only if all the resource cleanup is done. + if retErr != nil && cleanupErr == nil { + if cleanupErr = sandbox.NetNS.Remove(); cleanupErr != nil { + log.G(ctx).WithError(cleanupErr).Errorf("Failed to remove network namespace %s for sandbox %q", sandbox.NetNSPath, id) + return + } + sandbox.NetNSPath = "" + } + }() + + // Update network namespace in the container's spec + c.updateNetNamespacePath(spec, sandbox.NetNSPath) + + if err := container.Update(ctx, + // Update spec of the container + containerd.UpdateContainerOpts(containerd.WithSpec(spec)), + // Update sandbox metadata to include NetNS info + containerd.UpdateContainerOpts(containerd.WithContainerExtension(sandboxMetadataExtension, &sandbox.Metadata)), + ); err != nil { + return nil, fmt.Errorf("failed to update the network namespace for the sandbox container %q: %w", id, err) + } + + // Define this defer to teardownPodNetwork prior to the setupPodNetwork function call. + // This is because in setupPodNetwork the resource is allocated even if it returns error, unlike other resource creation functions. + defer func() { + // Teardown the network only if all the resource cleanup is done. + if retErr != nil && cleanupErr == nil { + deferCtx, deferCancel := ctrdutil.DeferContext() + defer deferCancel() + // Teardown network if an error is returned. + if cleanupErr = c.teardownPodNetwork(deferCtx, sandbox); cleanupErr != nil { + log.G(ctx).WithError(cleanupErr).Errorf("Failed to destroy network for sandbox %q", id) + } + } + }() + + // Setup network for sandbox. + // Certain VM based solutions like clear containers (Issue containerd/cri-containerd#524) + // rely on the assumption that CRI shim will not be querying the network namespace to check the + // network states such as IP. + // In future runtime implementation should avoid relying on CRI shim implementation details. + // In this case however caching the IP will add a subtle performance enhancement by avoiding + // calls to network namespace of the pod to query the IP of the veth interface on every + // SandboxStatus request. + if err := c.setupPodNetwork(ctx, &sandbox); err != nil { + return nil, fmt.Errorf("failed to setup network for sandbox %q: %w", id, err) + } + + // Update metadata here to save CNI result and pod IPs to disk. + if err := container.Update(ctx, + // Update sandbox metadata to include NetNS info + containerd.UpdateContainerOpts(containerd.WithContainerExtension(sandboxMetadataExtension, &sandbox.Metadata)), + ); err != nil { + return nil, fmt.Errorf("failed to update the network namespace for the sandbox container %q: %w", id, err) + } + + sandboxCreateNetworkTimer.UpdateSince(netStart) } // Create sandbox task in containerd. @@ -276,10 +342,13 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox id, name) taskOpts := c.taskOpts(ociRuntime.Type) + if ociRuntime.Path != "" { + taskOpts = append(taskOpts, containerd.WithRuntimePath(ociRuntime.Path)) + } // We don't need stdio for sandbox container. task, err := container.NewTask(ctx, containerdio.NullIO, taskOpts...) if err != nil { - return nil, errors.Wrap(err, "failed to create containerd task") + return nil, fmt.Errorf("failed to create containerd task: %w", err) } defer func() { if retErr != nil { @@ -288,6 +357,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // Cleanup the sandbox container if an error is returned. if _, err := task.Delete(deferCtx, WithNRISandboxDelete(id), containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) { log.G(ctx).WithError(err).Errorf("Failed to delete sandbox container %q", id) + cleanupErr = err } } }() @@ -295,12 +365,12 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // wait is a long running background request, no timeout needed. exitCh, err := task.Wait(ctrdutil.NamespacedContext()) if err != nil { - return nil, errors.Wrap(err, "failed to wait for sandbox container task") + return nil, fmt.Errorf("failed to wait for sandbox container task: %w", err) } nric, err := nri.New() if err != nil { - return nil, errors.Wrap(err, "unable to create nri client") + return nil, fmt.Errorf("unable to create nri client: %w", err) } if nric != nil { nriSB := &nri.Sandbox{ @@ -308,12 +378,12 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox Labels: config.Labels, } if _, err := nric.InvokeWithSandbox(ctx, task, v1.Create, nriSB); err != nil { - return nil, errors.Wrap(err, "nri invoke") + return nil, fmt.Errorf("nri invoke: %w", err) } } if err := task.Start(ctx); err != nil { - return nil, errors.Wrapf(err, "failed to start sandbox container task %q", id) + return nil, fmt.Errorf("failed to start sandbox container task %q: %w", id, err) } if err := sandbox.Status.Update(func(status sandboxstore.Status) (sandboxstore.Status, error) { @@ -323,14 +393,11 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox status.CreatedAt = info.CreatedAt return status, nil }); err != nil { - return nil, errors.Wrap(err, "failed to update sandbox status") + return nil, fmt.Errorf("failed to update sandbox status: %w", err) } - // Add sandbox into sandbox store in INIT state. - sandbox.Container = container - if err := c.sandboxStore.Add(sandbox); err != nil { - return nil, errors.Wrapf(err, "failed to add sandbox %+v into store", sandbox) + return nil, fmt.Errorf("failed to add sandbox %+v into store: %w", sandbox, err) } // start the monitor after adding sandbox into the store, this ensures @@ -340,37 +407,59 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox // but we don't care about sandbox TaskOOM right now, so it is fine. c.eventMonitor.startSandboxExitMonitor(context.Background(), id, task.Pid(), exitCh) + sandboxRuntimeCreateTimer.WithValues(ociRuntime.Type).UpdateSince(runtimeStart) + return &runtime.RunPodSandboxResponse{PodSandboxId: id}, nil } +// getNetworkPlugin returns the network plugin to be used by the runtime class +// defaults to the global CNI options in the CRI config +func (c *criService) getNetworkPlugin(runtimeClass string) cni.CNI { + if c.netPlugin == nil { + return nil + } + i, ok := c.netPlugin[runtimeClass] + if !ok { + if i, ok = c.netPlugin[defaultNetworkPlugin]; !ok { + return nil + } + } + return i +} + // setupPodNetwork setups up the network for a pod func (c *criService) setupPodNetwork(ctx context.Context, sandbox *sandboxstore.Sandbox) error { var ( - id = sandbox.ID - config = sandbox.Config - path = sandbox.NetNSPath + id = sandbox.ID + config = sandbox.Config + path = sandbox.NetNSPath + netPlugin = c.getNetworkPlugin(sandbox.RuntimeHandler) ) - if c.netPlugin == nil { + if netPlugin == nil { return errors.New("cni config not initialized") } opts, err := cniNamespaceOpts(id, config) if err != nil { - return errors.Wrap(err, "get cni namespace options") + return fmt.Errorf("get cni namespace options: %w", err) } - - result, err := c.netPlugin.Setup(ctx, id, path, opts...) + log.G(ctx).WithField("podsandboxid", id).Debugf("begin cni setup") + netStart := time.Now() + result, err := netPlugin.Setup(ctx, id, path, opts...) + networkPluginOperations.WithValues(networkSetUpOp).Inc() + networkPluginOperationsLatency.WithValues(networkSetUpOp).UpdateSince(netStart) if err != nil { + networkPluginOperationsErrors.WithValues(networkSetUpOp).Inc() return err } logDebugCNIResult(ctx, id, result) // Check if the default interface has IP config if configs, ok := result.Interfaces[defaultIfName]; ok && len(configs.IPConfigs) > 0 { - sandbox.IP, sandbox.AdditionalIPs = selectPodIPs(configs.IPConfigs) + sandbox.IP, sandbox.AdditionalIPs = selectPodIPs(ctx, configs.IPConfigs, c.config.IPPreference) sandbox.CNIResult = result return nil } - return errors.Errorf("failed to find network info for sandbox %q", id) + return fmt.Errorf("failed to find network info for sandbox %q", id) } // cniNamespaceOpts get CNI namespace options from sandbox config. @@ -418,7 +507,7 @@ func toCNILabels(id string, config *runtime.PodSandboxConfig) map[string]string func toCNIBandWidth(annotations map[string]string) (*cni.BandWidth, error) { ingress, egress, err := bandwidth.ExtractPodBandwidthResources(annotations) if err != nil { - return nil, errors.Wrap(err, "reading pod bandwidth annotations") + return nil, fmt.Errorf("reading pod bandwidth annotations: %w", err) } if ingress == nil && egress == nil { @@ -469,28 +558,46 @@ func toCNIDNS(dns *runtime.DNSConfig) *cni.DNS { } } -// selectPodIPs select an ip from the ip list. It prefers ipv4 more than ipv6 -// and returns the additional ips -// TODO(random-liu): Revisit the ip order in the ipv6 beta stage. (cri#1278) -func selectPodIPs(ipConfigs []*cni.IPConfig) (string, []string) { - var ( - additionalIPs []string - ip string - ) - for _, c := range ipConfigs { - if c.IP.To4() != nil && ip == "" { - ip = c.IP.String() - } else { - additionalIPs = append(additionalIPs, c.IP.String()) +// selectPodIPs select an ip from the ip list. +func selectPodIPs(ctx context.Context, configs []*cni.IPConfig, preference string) (string, []string) { + if len(configs) == 1 { + return ipString(configs[0]), nil + } + toStrings := func(ips []*cni.IPConfig) (o []string) { + for _, i := range ips { + o = append(o, ipString(i)) } + return o } - if ip != "" { - return ip, additionalIPs + var extra []string + switch preference { + default: + if preference != "ipv4" && preference != "" { + log.G(ctx).WithField("ip_pref", preference).Warn("invalid ip_pref, falling back to ipv4") + } + for i, ip := range configs { + if ip.IP.To4() != nil { + return ipString(ip), append(extra, toStrings(configs[i+1:])...) + } + extra = append(extra, ipString(ip)) + } + case "ipv6": + for i, ip := range configs { + if ip.IP.To16() != nil { + return ipString(ip), append(extra, toStrings(configs[i+1:])...) + } + extra = append(extra, ipString(ip)) + } + case "cni": + // use func default return } - if len(ipConfigs) == 1 { - return additionalIPs[0], nil - } - return additionalIPs[0], additionalIPs[1:] + + all := toStrings(configs) + return all[0], all[1:] +} + +func ipString(ip *cni.IPConfig) string { + return ip.IP.String() } // untrustedWorkload returns true if the sandbox contains untrusted workload. @@ -542,7 +649,7 @@ func (c *criService) getSandboxRuntime(config *runtime.PodSandboxConfig, runtime handler, ok := c.config.ContainerdConfig.Runtimes[runtimeHandler] if !ok { - return criconfig.Runtime{}, errors.Errorf("no runtime for %q is configured", runtimeHandler) + return criconfig.Runtime{}, fmt.Errorf("no runtime for %q is configured", runtimeHandler) } return handler, nil } @@ -553,8 +660,8 @@ func logDebugCNIResult(ctx context.Context, sandboxID string, result *cni.Result } cniResult, err := json.Marshal(result) if err != nil { - log.G(ctx).WithError(err).Errorf("Failed to marshal CNI result for sandbox %q: %v", sandboxID, err) + log.G(ctx).WithField("podsandboxid", sandboxID).WithError(err).Errorf("Failed to marshal CNI result: %v", err) return } - log.G(ctx).Debugf("cni result for sandbox %q: %s", sandboxID, string(cniResult)) + log.G(ctx).WithField("podsandboxid", sandboxID).Debugf("cni result: %s", string(cniResult)) } diff --git a/pkg/cri/server/sandbox_run_linux.go b/pkg/cri/server/sandbox_run_linux.go index 85cfeb3..577d25e 100644 --- a/pkg/cri/server/sandbox_run_linux.go +++ b/pkg/cri/server/sandbox_run_linux.go @@ -19,6 +19,7 @@ package server import ( "fmt" "os" + "strconv" "strings" "github.com/containerd/containerd" @@ -27,13 +28,13 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" "golang.org/x/sys/unix" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" customopts "github.com/containerd/containerd/pkg/cri/opts" osinterface "github.com/containerd/containerd/pkg/os" + "github.com/containerd/containerd/pkg/userns" ) func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxConfig, @@ -54,7 +55,7 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC if len(imageConfig.Entrypoint) == 0 && len(imageConfig.Cmd) == 0 { // Pause image must have entrypoint or cmd. - return nil, errors.Errorf("invalid empty entrypoint and cmd in image config %+v", imageConfig) + return nil, fmt.Errorf("invalid empty entrypoint and cmd in image config %+v", imageConfig) } specOpts = append(specOpts, oci.WithProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...)...)) @@ -118,7 +119,7 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC processLabel, mountLabel, err := initLabelsFromOpt(securityContext.GetSelinuxOptions()) if err != nil { - return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions()) + return nil, fmt.Errorf("failed to init selinux options %+v: %w", securityContext.GetSelinuxOptions(), err) } defer func() { if retErr != nil { @@ -134,6 +135,19 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC // Add sysctls sysctls := config.GetLinux().GetSysctls() + if sysctls == nil { + sysctls = make(map[string]string) + } + _, ipUnprivilegedPortStart := sysctls["net.ipv4.ip_unprivileged_port_start"] + _, pingGroupRange := sysctls["net.ipv4.ping_group_range"] + if nsOptions.GetNetwork() != runtime.NamespaceMode_NODE { + if c.config.EnableUnprivilegedPorts && !ipUnprivilegedPortStart { + sysctls["net.ipv4.ip_unprivileged_port_start"] = "0" + } + if c.config.EnableUnprivilegedICMP && !pingGroupRange && !userns.RunningInUserNS() { + sysctls["net.ipv4.ping_group_range"] = "0 2147483647" + } + } specOpts = append(specOpts, customopts.WithSysctls(sysctls)) // Note: LinuxSandboxSecurityContext does not currently provide an apparmor profile @@ -141,6 +155,15 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC if !c.config.DisableCgroup { specOpts = append(specOpts, customopts.WithDefaultSandboxShares) } + + if res := config.GetLinux().GetResources(); res != nil { + specOpts = append(specOpts, + customopts.WithAnnotation(annotations.SandboxCPUPeriod, strconv.FormatInt(res.CpuPeriod, 10)), + customopts.WithAnnotation(annotations.SandboxCPUQuota, strconv.FormatInt(res.CpuQuota, 10)), + customopts.WithAnnotation(annotations.SandboxCPUShares, strconv.FormatInt(res.CpuShares, 10)), + customopts.WithAnnotation(annotations.SandboxMem, strconv.FormatInt(res.MemoryLimitInBytes, 10))) + } + specOpts = append(specOpts, customopts.WithPodOOMScoreAdj(int(defaultSandboxOOMAdj), c.config.RestrictOOMScoreAdj)) for pKey, pValue := range getPassthroughAnnotations(config.Annotations, @@ -152,6 +175,7 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC customopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox), customopts.WithAnnotation(annotations.SandboxID, id), customopts.WithAnnotation(annotations.SandboxNamespace, config.GetMetadata().GetNamespace()), + customopts.WithAnnotation(annotations.SandboxUID, config.GetMetadata().GetUid()), customopts.WithAnnotation(annotations.SandboxName, config.GetMetadata().GetName()), customopts.WithAnnotation(annotations.SandboxLogDir, config.GetLogDirectory()), ) @@ -173,7 +197,7 @@ func (c *criService) sandboxContainerSpecOpts(config *runtime.PodSandboxConfig, securityContext.GetSeccompProfilePath(), //nolint:staticcheck // Deprecated but we don't want to remove yet c.config.UnsetSeccompProfile) if err != nil { - return nil, errors.Wrap(err, "failed to generate seccomp spec opts") + return nil, fmt.Errorf("failed to generate seccomp spec opts: %w", err) } } seccompSpecOpts, err := c.generateSeccompSpecOpts( @@ -181,7 +205,7 @@ func (c *criService) sandboxContainerSpecOpts(config *runtime.PodSandboxConfig, securityContext.GetPrivileged(), c.seccompEnabled()) if err != nil { - return nil, errors.Wrap(err, "failed to generate seccomp spec opts") + return nil, fmt.Errorf("failed to generate seccomp spec opts: %w", err) } if seccompSpecOpts != nil { specOpts = append(specOpts, seccompSpecOpts) @@ -193,7 +217,7 @@ func (c *criService) sandboxContainerSpecOpts(config *runtime.PodSandboxConfig, securityContext.GetRunAsGroup(), ) if err != nil { - return nil, errors.Wrap(err, "failed to generate user string") + return nil, fmt.Errorf("failed to generate user string: %w", err) } if userstr == "" { // Lastly, since no user override was passed via CRI try to set via OCI @@ -215,17 +239,17 @@ func (c *criService) setupSandboxFiles(id string, config *runtime.PodSandboxConf var err error hostname, err = c.os.Hostname() if err != nil { - return errors.Wrap(err, "failed to get hostname") + return fmt.Errorf("failed to get hostname: %w", err) } } if err := c.os.WriteFile(sandboxEtcHostname, []byte(hostname+"\n"), 0644); err != nil { - return errors.Wrapf(err, "failed to write hostname to %q", sandboxEtcHostname) + return fmt.Errorf("failed to write hostname to %q: %w", sandboxEtcHostname, err) } // TODO(random-liu): Consider whether we should maintain /etc/hosts and /etc/resolv.conf in kubelet. sandboxEtcHosts := c.getSandboxHosts(id) if err := c.os.CopyFile(etcHosts, sandboxEtcHosts, 0644); err != nil { - return errors.Wrapf(err, "failed to generate sandbox hosts file %q", sandboxEtcHosts) + return fmt.Errorf("failed to generate sandbox hosts file %q: %w", sandboxEtcHosts, err) } // Set DNS options. Maintain a resolv.conf for the sandbox. @@ -234,7 +258,7 @@ func (c *criService) setupSandboxFiles(id string, config *runtime.PodSandboxConf if dnsConfig := config.GetDnsConfig(); dnsConfig != nil { resolvContent, err = parseDNSOptions(dnsConfig.Servers, dnsConfig.Searches, dnsConfig.Options) if err != nil { - return errors.Wrapf(err, "failed to parse sandbox DNSConfig %+v", dnsConfig) + return fmt.Errorf("failed to parse sandbox DNSConfig %+v: %w", dnsConfig, err) } } resolvPath := c.getResolvPath(id) @@ -242,28 +266,28 @@ func (c *criService) setupSandboxFiles(id string, config *runtime.PodSandboxConf // copy host's resolv.conf to resolvPath err = c.os.CopyFile(resolvConfPath, resolvPath, 0644) if err != nil { - return errors.Wrapf(err, "failed to copy host's resolv.conf to %q", resolvPath) + return fmt.Errorf("failed to copy host's resolv.conf to %q: %w", resolvPath, err) } } else { err = c.os.WriteFile(resolvPath, []byte(resolvContent), 0644) if err != nil { - return errors.Wrapf(err, "failed to write resolv content to %q", resolvPath) + return fmt.Errorf("failed to write resolv content to %q: %w", resolvPath, err) } } // Setup sandbox /dev/shm. if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() == runtime.NamespaceMode_NODE { if _, err := c.os.Stat(devShm); err != nil { - return errors.Wrapf(err, "host %q is not available for host ipc", devShm) + return fmt.Errorf("host %q is not available for host ipc: %w", devShm, err) } } else { sandboxDevShm := c.getSandboxDevShm(id) if err := c.os.MkdirAll(sandboxDevShm, 0700); err != nil { - return errors.Wrap(err, "failed to create sandbox shm") + return fmt.Errorf("failed to create sandbox shm: %w", err) } shmproperty := fmt.Sprintf("mode=1777,size=%d", defaultShmSize) if err := c.os.(osinterface.UNIX).Mount("shm", sandboxDevShm, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), shmproperty); err != nil { - return errors.Wrap(err, "failed to mount sandbox shm") + return fmt.Errorf("failed to mount sandbox shm: %w", err) } } @@ -296,10 +320,10 @@ func (c *criService) cleanupSandboxFiles(id string, config *runtime.PodSandboxCo if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() != runtime.NamespaceMode_NODE { path, err := c.os.FollowSymlinkInScope(c.getSandboxDevShm(id), "/") if err != nil { - return errors.Wrap(err, "failed to follow symlink") + return fmt.Errorf("failed to follow symlink: %w", err) } if err := c.os.(osinterface.UNIX).Unmount(path); err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "failed to unmount %q", path) + return fmt.Errorf("failed to unmount %q: %w", path, err) } } return nil @@ -320,3 +344,12 @@ func (c *criService) taskOpts(runtimeType string) []containerd.NewTaskOpts { return taskOpts } + +func (c *criService) updateNetNamespacePath(spec *runtimespec.Spec, nsPath string) { + for i := range spec.Linux.Namespaces { + if spec.Linux.Namespaces[i].Type == runtimespec.NetworkNamespace { + spec.Linux.Namespaces[i].Path = nsPath + break + } + } +} diff --git a/pkg/cri/server/sandbox_run_linux_test.go b/pkg/cri/server/sandbox_run_linux_test.go index f4b3160..6bb2f07 100644 --- a/pkg/cri/server/sandbox_run_linux_test.go +++ b/pkg/cri/server/sandbox_run_linux_test.go @@ -19,6 +19,7 @@ package server import ( "os" "path/filepath" + "strconv" "testing" imagespec "github.com/opencontainers/image-spec/specs-go/v1" @@ -26,7 +27,8 @@ import ( "github.com/opencontainers/selinux/go-selinux" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + v1 "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" "github.com/containerd/containerd/pkg/cri/opts" @@ -76,6 +78,9 @@ func getRunPodSandboxTestData() (*runtime.PodSandboxConfig, *imagespec.ImageConf assert.Contains(t, spec.Annotations, annotations.SandboxNamespace) assert.EqualValues(t, spec.Annotations[annotations.SandboxNamespace], "test-ns") + assert.Contains(t, spec.Annotations, annotations.SandboxUID) + assert.EqualValues(t, spec.Annotations[annotations.SandboxUID], "test-uid") + assert.Contains(t, spec.Annotations, annotations.SandboxName) assert.EqualValues(t, spec.Annotations[annotations.SandboxName], "test-name") @@ -115,6 +120,8 @@ func TestLinuxSandboxContainerSpec(t *testing.T) { assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{ Type: runtimespec.IPCNamespace, }) + assert.Contains(t, spec.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], "0") + assert.Contains(t, spec.Linux.Sysctl["net.ipv4.ping_group_range"], "0 2147483647") }, }, "host namespace": { @@ -142,6 +149,8 @@ func TestLinuxSandboxContainerSpec(t *testing.T) { assert.NotContains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{ Type: runtimespec.IPCNamespace, }) + assert.NotContains(t, spec.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], "0") + assert.NotContains(t, spec.Linux.Sysctl["net.ipv4.ping_group_range"], "0 2147483647") }, }, "should set supplemental groups correctly": { @@ -156,9 +165,83 @@ func TestLinuxSandboxContainerSpec(t *testing.T) { assert.Contains(t, spec.Process.User.AdditionalGids, uint32(2222)) }, }, + "should overwrite default sysctls": { + configChange: func(c *runtime.PodSandboxConfig) { + c.Linux.Sysctls = map[string]string{ + "net.ipv4.ip_unprivileged_port_start": "500", + "net.ipv4.ping_group_range": "1 1000", + } + }, + specCheck: func(t *testing.T, spec *runtimespec.Spec) { + require.NotNil(t, spec.Process) + assert.Contains(t, spec.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], "500") + assert.Contains(t, spec.Linux.Sysctl["net.ipv4.ping_group_range"], "1 1000") + }, + }, + "sandbox sizing annotations should be set if LinuxContainerResources were provided": { + configChange: func(c *runtime.PodSandboxConfig) { + c.Linux.Resources = &v1.LinuxContainerResources{ + CpuPeriod: 100, + CpuQuota: 200, + CpuShares: 5000, + MemoryLimitInBytes: 1024, + } + }, + specCheck: func(t *testing.T, spec *runtimespec.Spec) { + value, ok := spec.Annotations[annotations.SandboxCPUPeriod] + assert.True(t, ok) + assert.EqualValues(t, strconv.FormatInt(100, 10), value) + assert.EqualValues(t, "100", value) + + value, ok = spec.Annotations[annotations.SandboxCPUQuota] + assert.True(t, ok) + assert.EqualValues(t, "200", value) + + value, ok = spec.Annotations[annotations.SandboxCPUShares] + assert.True(t, ok) + assert.EqualValues(t, "5000", value) + + value, ok = spec.Annotations[annotations.SandboxMem] + assert.True(t, ok) + assert.EqualValues(t, "1024", value) + }, + }, + "sandbox sizing annotations should not be set if LinuxContainerResources were not provided": { + specCheck: func(t *testing.T, spec *runtimespec.Spec) { + _, ok := spec.Annotations[annotations.SandboxCPUPeriod] + assert.False(t, ok) + _, ok = spec.Annotations[annotations.SandboxCPUQuota] + assert.False(t, ok) + _, ok = spec.Annotations[annotations.SandboxCPUShares] + assert.False(t, ok) + _, ok = spec.Annotations[annotations.SandboxMem] + assert.False(t, ok) + }, + }, + "sandbox sizing annotations are zero if the resources are set to 0": { + configChange: func(c *runtime.PodSandboxConfig) { + c.Linux.Resources = &v1.LinuxContainerResources{} + }, + specCheck: func(t *testing.T, spec *runtimespec.Spec) { + value, ok := spec.Annotations[annotations.SandboxCPUPeriod] + assert.True(t, ok) + assert.EqualValues(t, "0", value) + value, ok = spec.Annotations[annotations.SandboxCPUQuota] + assert.True(t, ok) + assert.EqualValues(t, "0", value) + value, ok = spec.Annotations[annotations.SandboxCPUShares] + assert.True(t, ok) + assert.EqualValues(t, "0", value) + value, ok = spec.Annotations[annotations.SandboxMem] + assert.True(t, ok) + assert.EqualValues(t, "0", value) + }, + }, } { t.Logf("TestCase %q", desc) c := newTestCRIService() + c.config.EnableUnprivilegedICMP = true + c.config.EnableUnprivilegedPorts = true config, imageConfig, specCheck := getRunPodSandboxTestData() if test.configChange != nil { test.configChange(config) diff --git a/pkg/cri/server/sandbox_run_other.go b/pkg/cri/server/sandbox_run_other.go index 61d3904..cd39d6f 100644 --- a/pkg/cri/server/sandbox_run_other.go +++ b/pkg/cri/server/sandbox_run_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -23,7 +24,7 @@ import ( "github.com/containerd/containerd/oci" imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxConfig, @@ -53,3 +54,6 @@ func (c *criService) cleanupSandboxFiles(id string, config *runtime.PodSandboxCo func (c *criService) taskOpts(runtimeType string) []containerd.NewTaskOpts { return []containerd.NewTaskOpts{} } + +func (c *criService) updateNetNamespacePath(spec *runtimespec.Spec, nsPath string) { +} diff --git a/pkg/cri/server/sandbox_run_other_test.go b/pkg/cri/server/sandbox_run_other_test.go index daf9039..b875089 100644 --- a/pkg/cri/server/sandbox_run_other_test.go +++ b/pkg/cri/server/sandbox_run_other_test.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -23,7 +24,7 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func getRunPodSandboxTestData() (*runtime.PodSandboxConfig, *imagespec.ImageConfig, func(*testing.T, string, *runtimespec.Spec)) { diff --git a/pkg/cri/server/sandbox_run_test.go b/pkg/cri/server/sandbox_run_test.go index 75960b3..ea6e785 100644 --- a/pkg/cri/server/sandbox_run_test.go +++ b/pkg/cri/server/sandbox_run_test.go @@ -26,7 +26,8 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" criconfig "github.com/containerd/containerd/pkg/cri/config" @@ -260,12 +261,26 @@ func TestSelectPodIP(t *testing.T) { ips []string expectedIP string expectedAdditionalIPs []string + pref string }{ "ipv4 should be picked even if ipv6 comes first": { ips: []string{"2001:db8:85a3::8a2e:370:7334", "192.168.17.43"}, expectedIP: "192.168.17.43", expectedAdditionalIPs: []string{"2001:db8:85a3::8a2e:370:7334"}, }, + "ipv6 should be picked even if ipv4 comes first": { + ips: []string{"2001:db8:85a3::8a2e:370:7334", "192.168.17.43"}, + expectedIP: "2001:db8:85a3::8a2e:370:7334", + expectedAdditionalIPs: []string{"192.168.17.43"}, + pref: "ipv6", + }, + "order should reflect ip selection": { + ips: []string{"2001:db8:85a3::8a2e:370:7334", "192.168.17.43"}, + expectedIP: "2001:db8:85a3::8a2e:370:7334", + expectedAdditionalIPs: []string{"192.168.17.43"}, + pref: "cni", + }, + "ipv4 should be picked when there is only ipv4": { ips: []string{"192.168.17.43"}, expectedIP: "192.168.17.43", @@ -289,7 +304,7 @@ func TestSelectPodIP(t *testing.T) { IP: net.ParseIP(ip), }) } - ip, additionalIPs := selectPodIPs(ipConfigs) + ip, additionalIPs := selectPodIPs(context.Background(), ipConfigs, test.pref) assert.Equal(t, test.expectedIP, ip) assert.Equal(t, test.expectedAdditionalIPs, additionalIPs) } diff --git a/pkg/cri/server/sandbox_run_windows.go b/pkg/cri/server/sandbox_run_windows.go index 0bc4b78..10c8e5c 100644 --- a/pkg/cri/server/sandbox_run_windows.go +++ b/pkg/cri/server/sandbox_run_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,12 +17,14 @@ package server import ( + "fmt" + "strconv" + "github.com/containerd/containerd" "github.com/containerd/containerd/oci" imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" customopts "github.com/containerd/containerd/pkg/cri/opts" @@ -43,7 +43,7 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC if len(imageConfig.Entrypoint) == 0 && len(imageConfig.Cmd) == 0 { // Pause image must have entrypoint or cmd. - return nil, errors.Errorf("invalid empty entrypoint and cmd in image config %+v", imageConfig) + return nil, fmt.Errorf("invalid empty entrypoint and cmd in image config %+v", imageConfig) } specOpts = append(specOpts, oci.WithProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...)...)) @@ -56,6 +56,25 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC specOpts = append(specOpts, customopts.WithWindowsDefaultSandboxShares) + // Start with the image config user and override below if RunAsUsername is not "". + username := imageConfig.User + + runAsUser := config.GetWindows().GetSecurityContext().GetRunAsUsername() + if runAsUser != "" { + username = runAsUser + } + + cs := config.GetWindows().GetSecurityContext().GetCredentialSpec() + if cs != "" { + specOpts = append(specOpts, customopts.WithWindowsCredentialSpec(cs)) + } + + // There really isn't a good Windows way to verify that the username is available in the + // image as early as here like there is for Linux. Later on in the stack hcsshim + // will handle the behavior of erroring out if the user isn't available in the image + // when trying to run the init process. + specOpts = append(specOpts, oci.WithUser(username)) + for pKey, pValue := range getPassthroughAnnotations(config.Annotations, runtimePodAnnotations) { specOpts = append(specOpts, customopts.WithAnnotation(pKey, pValue)) @@ -65,8 +84,10 @@ func (c *criService) sandboxContainerSpec(id string, config *runtime.PodSandboxC customopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox), customopts.WithAnnotation(annotations.SandboxID, id), customopts.WithAnnotation(annotations.SandboxNamespace, config.GetMetadata().GetNamespace()), + customopts.WithAnnotation(annotations.SandboxUID, config.GetMetadata().GetUid()), customopts.WithAnnotation(annotations.SandboxName, config.GetMetadata().GetName()), customopts.WithAnnotation(annotations.SandboxLogDir, config.GetLogDirectory()), + customopts.WithAnnotation(annotations.WindowsHostProcess, strconv.FormatBool(config.GetWindows().GetSecurityContext().GetHostProcess())), ) return c.runtimeSpec(id, "", specOpts...) @@ -91,3 +112,7 @@ func (c *criService) cleanupSandboxFiles(id string, config *runtime.PodSandboxCo func (c *criService) taskOpts(runtimeType string) []containerd.NewTaskOpts { return nil } + +func (c *criService) updateNetNamespacePath(spec *runtimespec.Spec, nsPath string) { + spec.Windows.Network.NetworkNamespace = nsPath +} diff --git a/pkg/cri/server/sandbox_run_windows_test.go b/pkg/cri/server/sandbox_run_windows_test.go index d51e2d0..358f2ea 100644 --- a/pkg/cri/server/sandbox_run_windows_test.go +++ b/pkg/cri/server/sandbox_run_windows_test.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -24,7 +22,7 @@ import ( imagespec "github.com/opencontainers/image-spec/specs-go/v1" runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/annotations" "github.com/containerd/containerd/pkg/cri/opts" @@ -42,12 +40,20 @@ func getRunPodSandboxTestData() (*runtime.PodSandboxConfig, *imagespec.ImageConf LogDirectory: "test-log-directory", Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"c": "d"}, + Windows: &runtime.WindowsPodSandboxConfig{ + SecurityContext: &runtime.WindowsSandboxSecurityContext{ + RunAsUsername: "test-user", + CredentialSpec: "{\"test\": \"spec\"}", + HostProcess: false, + }, + }, } imageConfig := &imagespec.ImageConfig{ Env: []string{"a=b", "c=d"}, Entrypoint: []string{"/pause"}, Cmd: []string{"forever"}, WorkingDir: "/workspace", + User: "test-image-user", } specCheck := func(t *testing.T, id string, spec *runtimespec.Spec) { assert.Equal(t, "test-hostname", spec.Hostname) @@ -57,6 +63,13 @@ func getRunPodSandboxTestData() (*runtime.PodSandboxConfig, *imagespec.ImageConf assert.Equal(t, "/workspace", spec.Process.Cwd) assert.EqualValues(t, *spec.Windows.Resources.CPU.Shares, opts.DefaultSandboxCPUshares) + // Also checks if override of the image configs user is behaving. + t.Logf("Check username") + assert.Contains(t, spec.Process.User.Username, "test-user") + + t.Logf("Check credential spec") + assert.Contains(t, spec.Windows.CredentialSpec, "{\"test\": \"spec\"}") + t.Logf("Check PodSandbox annotations") assert.Contains(t, spec.Annotations, annotations.SandboxID) assert.EqualValues(t, spec.Annotations[annotations.SandboxID], id) @@ -67,11 +80,17 @@ func getRunPodSandboxTestData() (*runtime.PodSandboxConfig, *imagespec.ImageConf assert.Contains(t, spec.Annotations, annotations.SandboxNamespace) assert.EqualValues(t, spec.Annotations[annotations.SandboxNamespace], "test-ns") + assert.Contains(t, spec.Annotations, annotations.SandboxUID) + assert.EqualValues(t, spec.Annotations[annotations.SandboxUID], "test-uid") + assert.Contains(t, spec.Annotations, annotations.SandboxName) assert.EqualValues(t, spec.Annotations[annotations.SandboxName], "test-name") assert.Contains(t, spec.Annotations, annotations.SandboxLogDir) assert.EqualValues(t, spec.Annotations[annotations.SandboxLogDir], "test-log-directory") + + assert.Contains(t, spec.Annotations, annotations.WindowsHostProcess) + assert.EqualValues(t, spec.Annotations[annotations.WindowsHostProcess], "false") } return config, imageConfig, specCheck } diff --git a/pkg/cri/server/sandbox_stats.go b/pkg/cri/server/sandbox_stats.go new file mode 100644 index 0000000..6f831cf --- /dev/null +++ b/pkg/cri/server/sandbox_stats.go @@ -0,0 +1,47 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "fmt" + + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +func (c *criService) PodSandboxStats( + ctx context.Context, + r *runtime.PodSandboxStatsRequest, +) (*runtime.PodSandboxStatsResponse, error) { + + sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) + if err != nil { + return nil, fmt.Errorf("an error occurred when trying to find sandbox %s: %w", r.GetPodSandboxId(), err) + } + + metrics, err := metricsForSandbox(sandbox) + if err != nil { + return nil, fmt.Errorf("failed getting metrics for sandbox %s: %w", r.GetPodSandboxId(), err) + } + + podSandboxStats, err := c.podSandboxStats(ctx, sandbox, metrics) + if err != nil { + return nil, fmt.Errorf("failed to decode pod sandbox metrics %s: %w", r.GetPodSandboxId(), err) + } + + return &runtime.PodSandboxStatsResponse{Stats: podSandboxStats}, nil +} diff --git a/pkg/cri/server/sandbox_stats_linux.go b/pkg/cri/server/sandbox_stats_linux.go new file mode 100644 index 0000000..bd06cb5 --- /dev/null +++ b/pkg/cri/server/sandbox_stats_linux.go @@ -0,0 +1,177 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "fmt" + "time" + + "github.com/containernetworking/plugins/pkg/ns" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + + "github.com/containerd/cgroups" + cgroupsv2 "github.com/containerd/cgroups/v2" + + "github.com/vishvananda/netlink" + + "github.com/containerd/containerd/log" + sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" +) + +func (c *criService) podSandboxStats( + ctx context.Context, + sandbox sandboxstore.Sandbox, + stats interface{}, +) (*runtime.PodSandboxStats, error) { + meta := sandbox.Metadata + + if sandbox.Status.Get().State != sandboxstore.StateReady { + return nil, fmt.Errorf("failed to get pod sandbox stats since sandbox container %q is not in ready state", meta.ID) + } + + var podSandboxStats runtime.PodSandboxStats + podSandboxStats.Attributes = &runtime.PodSandboxAttributes{ + Id: meta.ID, + Metadata: meta.Config.GetMetadata(), + Labels: meta.Config.GetLabels(), + Annotations: meta.Config.GetAnnotations(), + } + + podSandboxStats.Linux = &runtime.LinuxPodSandboxStats{} + + if stats != nil { + timestamp := time.Now() + + cpuStats, err := c.cpuContainerStats(meta.ID, true /* isSandbox */, stats, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to obtain cpu stats: %w", err) + } + podSandboxStats.Linux.Cpu = cpuStats + + memoryStats, err := c.memoryContainerStats(meta.ID, stats, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to obtain memory stats: %w", err) + } + podSandboxStats.Linux.Memory = memoryStats + + if sandbox.NetNSPath != "" { + rxBytes, rxErrors, txBytes, txErrors := getContainerNetIO(ctx, sandbox.NetNSPath) + podSandboxStats.Linux.Network = &runtime.NetworkUsage{ + DefaultInterface: &runtime.NetworkInterfaceUsage{ + Name: defaultIfName, + RxBytes: &runtime.UInt64Value{Value: rxBytes}, + RxErrors: &runtime.UInt64Value{Value: rxErrors}, + TxBytes: &runtime.UInt64Value{Value: txBytes}, + TxErrors: &runtime.UInt64Value{Value: txErrors}, + }, + } + } + + var pidCount uint64 + for _, cntr := range c.containerStore.List() { + if cntr.SandboxID != sandbox.ID { + continue + } + + state := cntr.Status.Get().State() + if state != runtime.ContainerState_CONTAINER_RUNNING { + continue + } + + task, err := cntr.Container.Task(ctx, nil) + if err != nil { + return nil, err + } + + processes, err := task.Pids(ctx) + if err != nil { + return nil, err + } + pidCount += uint64(len(processes)) + + } + podSandboxStats.Linux.Process = &runtime.ProcessUsage{ + Timestamp: timestamp.UnixNano(), + ProcessCount: &runtime.UInt64Value{Value: pidCount}, + } + + listContainerStatsRequest := &runtime.ListContainerStatsRequest{Filter: &runtime.ContainerStatsFilter{PodSandboxId: meta.ID}} + resp, err := c.ListContainerStats(ctx, listContainerStatsRequest) + if err != nil { + return nil, fmt.Errorf("failed to obtain container stats during podSandboxStats call: %w", err) + } + podSandboxStats.Linux.Containers = resp.GetStats() + } + + return &podSandboxStats, nil +} + +// https://github.com/cri-o/cri-o/blob/74a5cf8dffd305b311eb1c7f43a4781738c388c1/internal/oci/stats.go#L32 +func getContainerNetIO(ctx context.Context, netNsPath string) (rxBytes, rxErrors, txBytes, txErrors uint64) { + ns.WithNetNSPath(netNsPath, func(_ ns.NetNS) error { + link, err := netlink.LinkByName(defaultIfName) + if err != nil { + log.G(ctx).WithError(err).Errorf("unable to retrieve network namespace stats for netNsPath: %v, interface: %v", netNsPath, defaultIfName) + return err + } + attrs := link.Attrs() + if attrs != nil && attrs.Statistics != nil { + rxBytes = attrs.Statistics.RxBytes + rxErrors = attrs.Statistics.RxErrors + txBytes = attrs.Statistics.TxBytes + txErrors = attrs.Statistics.TxErrors + } + return nil + }) + + return rxBytes, rxErrors, txBytes, txErrors +} + +func metricsForSandbox(sandbox sandboxstore.Sandbox) (interface{}, error) { + cgroupPath := sandbox.Config.GetLinux().GetCgroupParent() + + if cgroupPath == "" { + return nil, fmt.Errorf("failed to get cgroup metrics for sandbox %v because cgroupPath is empty", sandbox.ID) + } + + var statsx interface{} + if cgroups.Mode() == cgroups.Unified { + cg, err := cgroupsv2.LoadManager("/sys/fs/cgroup", cgroupPath) + if err != nil { + return nil, fmt.Errorf("failed to load sandbox cgroup: %v: %w", cgroupPath, err) + } + stats, err := cg.Stat() + if err != nil { + return nil, fmt.Errorf("failed to get stats for cgroup: %v: %w", cgroupPath, err) + } + statsx = stats + + } else { + control, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(cgroupPath)) + if err != nil { + return nil, fmt.Errorf("failed to load sandbox cgroup %v: %w", cgroupPath, err) + } + stats, err := control.Stat(cgroups.IgnoreNotExist) + if err != nil { + return nil, fmt.Errorf("failed to get stats for cgroup %v: %w", cgroupPath, err) + } + statsx = stats + } + + return statsx, nil +} diff --git a/pkg/cri/server/sandbox_stats_list.go b/pkg/cri/server/sandbox_stats_list.go new file mode 100644 index 0000000..00eef3d --- /dev/null +++ b/pkg/cri/server/sandbox_stats_list.go @@ -0,0 +1,80 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "fmt" + + sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +// ListPodSandboxStats returns stats of all ready sandboxes. +func (c *criService) ListPodSandboxStats( + ctx context.Context, + r *runtime.ListPodSandboxStatsRequest, +) (*runtime.ListPodSandboxStatsResponse, error) { + sandboxes := c.sandboxesForListPodSandboxStatsRequest(r) + + podSandboxStats := new(runtime.ListPodSandboxStatsResponse) + for _, sandbox := range sandboxes { + metrics, err := metricsForSandbox(sandbox) + if err != nil { + return nil, fmt.Errorf("failed to obtain metrics for sandbox %q: %w", sandbox.ID, err) + } + + sandboxStats, err := c.podSandboxStats(ctx, sandbox, metrics) + if err != nil { + return nil, fmt.Errorf("failed to decode sandbox container metrics for sandbox %q: %w", sandbox.ID, err) + } + podSandboxStats.Stats = append(podSandboxStats.Stats, sandboxStats) + } + + return podSandboxStats, nil +} + +func (c *criService) sandboxesForListPodSandboxStatsRequest(r *runtime.ListPodSandboxStatsRequest) []sandboxstore.Sandbox { + sandboxesInStore := c.sandboxStore.List() + + if r.GetFilter() == nil { + return sandboxesInStore + } + + c.normalizePodSandboxStatsFilter(r.GetFilter()) + + var sandboxes []sandboxstore.Sandbox + for _, sandbox := range sandboxesInStore { + if r.GetFilter().GetId() != "" && sandbox.ID != r.GetFilter().GetId() { + continue + } + + if r.GetFilter().GetLabelSelector() != nil && + !matchLabelSelector(r.GetFilter().GetLabelSelector(), sandbox.Config.GetLabels()) { + continue + } + + // We can't obtain metrics for sandboxes that aren't in ready state + if sandbox.Status.Get().State != sandboxstore.StateReady { + continue + } + + sandboxes = append(sandboxes, sandbox) + } + + return sandboxes +} diff --git a/pkg/cri/server/sandbox_stats_other.go b/pkg/cri/server/sandbox_stats_other.go new file mode 100644 index 0000000..f21391e --- /dev/null +++ b/pkg/cri/server/sandbox_stats_other.go @@ -0,0 +1,38 @@ +//go:build !windows && !linux +// +build !windows,!linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "fmt" + + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + + "github.com/containerd/containerd/errdefs" + sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" +) + +func (c *criService) podSandboxStats(ctx context.Context, sandbox sandboxstore.Sandbox, stats interface{}) (*runtime.PodSandboxStats, error) { + return nil, fmt.Errorf("pod sandbox stats not implemented: %w", errdefs.ErrNotImplemented) +} + +func metricsForSandbox(sandbox sandboxstore.Sandbox) (interface{}, error) { + return nil, fmt.Errorf("metrics for sandbox not implemented: %w", errdefs.ErrNotImplemented) +} diff --git a/pkg/cri/server/sandbox_stats_windows.go b/pkg/cri/server/sandbox_stats_windows.go new file mode 100644 index 0000000..d4b2373 --- /dev/null +++ b/pkg/cri/server/sandbox_stats_windows.go @@ -0,0 +1,35 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "fmt" + + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + + "github.com/containerd/containerd/errdefs" + sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" +) + +func (c *criService) podSandboxStats(ctx context.Context, sandbox sandboxstore.Sandbox, stats interface{}) (*runtime.PodSandboxStats, error) { + return nil, fmt.Errorf("pod sandbox stats not implemented on windows: %w", errdefs.ErrNotImplemented) +} + +func metricsForSandbox(sandbox sandboxstore.Sandbox) (interface{}, error) { + return nil, fmt.Errorf("metrics for sandbox not implemented on windows: %w", errdefs.ErrNotImplemented) +} diff --git a/pkg/cri/server/sandbox_status.go b/pkg/cri/server/sandbox_status.go index f669204..fd2afc4 100644 --- a/pkg/cri/server/sandbox_status.go +++ b/pkg/cri/server/sandbox_status.go @@ -18,15 +18,15 @@ package server import ( "encoding/json" + "fmt" goruntime "runtime" "github.com/containerd/containerd" "github.com/containerd/containerd/errdefs" cni "github.com/containerd/go-cni" runtimespec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" ) @@ -35,19 +35,19 @@ import ( func (c *criService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandboxStatusRequest) (*runtime.PodSandboxStatusResponse, error) { sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) if err != nil { - return nil, errors.Wrap(err, "an error occurred when try to find sandbox") + return nil, fmt.Errorf("an error occurred when try to find sandbox: %w", err) } ip, additionalIPs, err := c.getIPs(sandbox) if err != nil { - return nil, errors.Wrap(err, "failed to get sandbox ip") + return nil, fmt.Errorf("failed to get sandbox ip: %w", err) } status := toCRISandboxStatus(sandbox.Metadata, sandbox.Status.Get(), ip, additionalIPs) if status.GetCreatedAt() == 0 { // CRI doesn't allow CreatedAt == 0. info, err := sandbox.Container.Info(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to get CreatedAt for sandbox container in %q state", status.State) + return nil, fmt.Errorf("failed to get CreatedAt for sandbox container in %q state: %w", status.State, err) } status.CreatedAt = info.CreatedAt.UnixNano() } @@ -58,7 +58,7 @@ func (c *criService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandbox // Generate verbose information. info, err := toCRISandboxInfo(ctx, sandbox) if err != nil { - return nil, errors.Wrap(err, "failed to get verbose sandbox container info") + return nil, fmt.Errorf("failed to get verbose sandbox container info: %w", err) } return &runtime.PodSandboxStatusResponse{ @@ -76,9 +76,12 @@ func (c *criService) getIPs(sandbox sandboxstore.Sandbox) (string, []string, err // responsible for reporting the IP. return "", nil, nil } + if goruntime.GOOS == "windows" && config.GetWindows().GetSecurityContext().GetHostProcess() { + return "", nil, nil + } if closed, err := sandbox.NetNS.Closed(); err != nil { - return "", nil, errors.Wrap(err, "check network namespace closed") + return "", nil, fmt.Errorf("check network namespace closed: %w", err) } else if closed { return "", nil, nil } @@ -147,17 +150,19 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st container := sandbox.Container task, err := container.Task(ctx, nil) if err != nil && !errdefs.IsNotFound(err) { - return nil, errors.Wrap(err, "failed to get sandbox container task") + return nil, fmt.Errorf("failed to get sandbox container task: %w", err) } var processStatus containerd.ProcessStatus if task != nil { - taskStatus, err := task.Status(ctx) - if err != nil { - return nil, errors.Wrap(err, "failed to get task status") + if taskStatus, err := task.Status(ctx); err != nil { + if !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("failed to get task status: %w", err) + } + processStatus = containerd.Unknown + } else { + processStatus = taskStatus.Status } - - processStatus = taskStatus.Status } si := &SandboxInfo{ @@ -178,20 +183,20 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st // Add network closed information if sandbox is not using host network. closed, err := sandbox.NetNS.Closed() if err != nil { - return nil, errors.Wrap(err, "failed to check network namespace closed") + return nil, fmt.Errorf("failed to check network namespace closed: %w", err) } si.NetNSClosed = closed } spec, err := container.Spec(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get sandbox container runtime spec") + return nil, fmt.Errorf("failed to get sandbox container runtime spec: %w", err) } si.RuntimeSpec = spec ctrInfo, err := container.Info(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get sandbox container info") + return nil, fmt.Errorf("failed to get sandbox container info: %w", err) } // Do not use config.SandboxImage because the configuration might // be changed during restart. It may not reflect the actual image @@ -202,14 +207,14 @@ func toCRISandboxInfo(ctx context.Context, sandbox sandboxstore.Sandbox) (map[st runtimeOptions, err := getRuntimeOptions(ctrInfo) if err != nil { - return nil, errors.Wrap(err, "failed to get runtime options") + return nil, fmt.Errorf("failed to get runtime options: %w", err) } si.RuntimeType = ctrInfo.Runtime.Name si.RuntimeOptions = runtimeOptions infoBytes, err := json.Marshal(si) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal info %v", si) + return nil, fmt.Errorf("failed to marshal info %v: %w", si, err) } return map[string]string{ "info": string(infoBytes), diff --git a/pkg/cri/server/sandbox_status_test.go b/pkg/cri/server/sandbox_status_test.go index 2d79444..1617d06 100644 --- a/pkg/cri/server/sandbox_status_test.go +++ b/pkg/cri/server/sandbox_status_test.go @@ -21,7 +21,7 @@ import ( "time" "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" ) diff --git a/pkg/cri/server/sandbox_stop.go b/pkg/cri/server/sandbox_stop.go index 2732730..963307e 100644 --- a/pkg/cri/server/sandbox_stop.go +++ b/pkg/cri/server/sandbox_stop.go @@ -17,15 +17,16 @@ package server import ( + "errors" + "fmt" "syscall" "time" eventtypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" sandboxstore "github.com/containerd/containerd/pkg/cri/store/sandbox" ctrdutil "github.com/containerd/containerd/pkg/cri/util" @@ -36,8 +37,8 @@ import ( func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandboxRequest) (*runtime.StopPodSandboxResponse, error) { sandbox, err := c.sandboxStore.Get(r.GetPodSandboxId()) if err != nil { - return nil, errors.Wrapf(err, "an error occurred when try to find sandbox %q", - r.GetPodSandboxId()) + return nil, fmt.Errorf("an error occurred when try to find sandbox %q: %w", + r.GetPodSandboxId(), err) } if err := c.stopPodSandbox(ctx, sandbox); err != nil { @@ -54,6 +55,7 @@ func (c *criService) stopPodSandbox(ctx context.Context, sandbox sandboxstore.Sa // Stop all containers inside the sandbox. This terminates the container forcibly, // and container may still be created, so production should not rely on this behavior. // TODO(random-liu): Introduce a state in sandbox to avoid future container creation. + stop := time.Now() containers := c.containerStore.List() for _, container := range containers { if container.SandboxID != id { @@ -62,37 +64,40 @@ func (c *criService) stopPodSandbox(ctx context.Context, sandbox sandboxstore.Sa // Forcibly stop the container. Do not use `StopContainer`, because it introduces a race // if a container is removed after list. if err := c.stopContainer(ctx, container, 0); err != nil { - return errors.Wrapf(err, "failed to stop container %q", container.ID) + return fmt.Errorf("failed to stop container %q: %w", container.ID, err) } } if err := c.cleanupSandboxFiles(id, sandbox.Config); err != nil { - return errors.Wrap(err, "failed to cleanup sandbox files") + return fmt.Errorf("failed to cleanup sandbox files: %w", err) } // Only stop sandbox container when it's running or unknown. state := sandbox.Status.Get().State if state == sandboxstore.StateReady || state == sandboxstore.StateUnknown { if err := c.stopSandboxContainer(ctx, sandbox); err != nil { - return errors.Wrapf(err, "failed to stop sandbox container %q in %q state", id, state) + return fmt.Errorf("failed to stop sandbox container %q in %q state: %w", id, state, err) } } + sandboxRuntimeStopTimer.WithValues(sandbox.RuntimeHandler).UpdateSince(stop) // Teardown network for sandbox. if sandbox.NetNS != nil { + netStop := time.Now() // Use empty netns path if netns is not available. This is defined in: // https://github.com/containernetworking/cni/blob/v0.7.0-alpha1/SPEC.md if closed, err := sandbox.NetNS.Closed(); err != nil { - return errors.Wrap(err, "failed to check network namespace closed") + return fmt.Errorf("failed to check network namespace closed: %w", err) } else if closed { sandbox.NetNSPath = "" } if err := c.teardownPodNetwork(ctx, sandbox); err != nil { - return errors.Wrapf(err, "failed to destroy network for sandbox %q", id) + return fmt.Errorf("failed to destroy network for sandbox %q: %w", id, err) } if err := sandbox.NetNS.Remove(); err != nil { - return errors.Wrapf(err, "failed to remove network namespace for sandbox %q", id) + return fmt.Errorf("failed to remove network namespace for sandbox %q: %w", id, err) } + sandboxDeleteNetwork.UpdateSince(netStop) } log.G(ctx).Infof("TearDown network for sandbox %q successfully", id) @@ -110,11 +115,11 @@ func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxst task, err := container.Task(ctx, nil) if err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to get sandbox container") + return fmt.Errorf("failed to get sandbox container: %w", err) } // Don't return for unknown state, some cleanup needs to be done. if state == sandboxstore.StateUnknown { - return cleanupUnknownSandbox(ctx, id, sandbox) + return cleanupUnknownSandbox(ctx, id, sandbox, c) } return nil } @@ -128,9 +133,9 @@ func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxst exitCh, err := task.Wait(waitCtx) if err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to wait for task") + return fmt.Errorf("failed to wait for task: %w", err) } - return cleanupUnknownSandbox(ctx, id, sandbox) + return cleanupUnknownSandbox(ctx, id, sandbox, c) } exitCtx, exitCancel := context.WithCancel(context.Background()) @@ -146,7 +151,7 @@ func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxst // Kill the sandbox container. if err = task.Kill(ctx, syscall.SIGKILL); err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "failed to kill sandbox container") + return fmt.Errorf("failed to kill sandbox container: %w", err) } return c.waitSandboxStop(ctx, sandbox) @@ -157,7 +162,7 @@ func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxst func (c *criService) waitSandboxStop(ctx context.Context, sandbox sandboxstore.Sandbox) error { select { case <-ctx.Done(): - return errors.Wrapf(ctx.Err(), "wait sandbox container %q", sandbox.ID) + return fmt.Errorf("wait sandbox container %q: %w", sandbox.ID, ctx.Err()) case <-sandbox.Stopped(): return nil } @@ -165,7 +170,8 @@ func (c *criService) waitSandboxStop(ctx context.Context, sandbox sandboxstore.S // teardownPodNetwork removes the network from the pod func (c *criService) teardownPodNetwork(ctx context.Context, sandbox sandboxstore.Sandbox) error { - if c.netPlugin == nil { + netPlugin := c.getNetworkPlugin(sandbox.RuntimeHandler) + if netPlugin == nil { return errors.New("cni config not initialized") } @@ -176,14 +182,22 @@ func (c *criService) teardownPodNetwork(ctx context.Context, sandbox sandboxstor ) opts, err := cniNamespaceOpts(id, config) if err != nil { - return errors.Wrap(err, "get cni namespace options") + return fmt.Errorf("get cni namespace options: %w", err) } - return c.netPlugin.Remove(ctx, id, path, opts...) + netStart := time.Now() + err = netPlugin.Remove(ctx, id, path, opts...) + networkPluginOperations.WithValues(networkTearDownOp).Inc() + networkPluginOperationsLatency.WithValues(networkTearDownOp).UpdateSince(netStart) + if err != nil { + networkPluginOperationsErrors.WithValues(networkTearDownOp).Inc() + return err + } + return nil } // cleanupUnknownSandbox cleanup stopped sandbox in unknown state. -func cleanupUnknownSandbox(ctx context.Context, id string, sandbox sandboxstore.Sandbox) error { +func cleanupUnknownSandbox(ctx context.Context, id string, sandbox sandboxstore.Sandbox, c *criService) error { // Reuse handleSandboxExit to do the cleanup. return handleSandboxExit(ctx, &eventtypes.TaskExit{ ContainerID: id, @@ -191,5 +205,5 @@ func cleanupUnknownSandbox(ctx context.Context, id string, sandbox sandboxstore. Pid: 0, ExitStatus: unknownExitCode, ExitedAt: time.Now(), - }, sandbox) + }, sandbox, c) } diff --git a/pkg/cri/server/service.go b/pkg/cri/server/service.go index cec15ff..4fd305d 100644 --- a/pkg/cri/server/service.go +++ b/pkg/cri/server/service.go @@ -23,17 +23,19 @@ import ( "net/http" "os" "path/filepath" + "sync" "time" "github.com/containerd/containerd" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/cri/streaming" + "github.com/containerd/containerd/pkg/kmutex" "github.com/containerd/containerd/plugin" cni "github.com/containerd/go-cni" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/grpc" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + runtime_alpha "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "github.com/containerd/containerd/pkg/cri/store/label" @@ -48,18 +50,26 @@ import ( "github.com/containerd/containerd/pkg/registrar" ) +// defaultNetworkPlugin is used for the default CNI configuration +const defaultNetworkPlugin = "default" + // grpcServices are all the grpc services provided by cri containerd. type grpcServices interface { runtime.RuntimeServiceServer runtime.ImageServiceServer } +type grpcAlphaServices interface { + runtime_alpha.RuntimeServiceServer + runtime_alpha.ImageServiceServer +} + // CRIService is the interface implement CRI remote service server. type CRIService interface { - Run() error + Run(ready func()) error // io.Closer is used by containerd to gracefully stop cri service. io.Closer - plugin.Service + Register(*grpc.Server) error grpcServices } @@ -86,7 +96,7 @@ type criService struct { // snapshotStore stores information of all snapshots. snapshotStore *snapshotstore.Store // netPlugin is used to setup and teardown network when run/stop pod sandbox. - netPlugin cni.CNI + netPlugin map[string]cni.CNI // client is an instance of the containerd client client *containerd.Client // streamServer is the streaming server serves container streaming request. @@ -98,12 +108,16 @@ type criService struct { initialized atomic.Bool // cniNetConfMonitor is used to reload cni network conf if there is // any valid fs change events from cni network conf dir. - cniNetConfMonitor *cniNetConfSyncer + cniNetConfMonitor map[string]*cniNetConfSyncer // baseOCISpecs contains cached OCI specs loaded via `Runtime.BaseRuntimeSpec` baseOCISpecs map[string]*oci.Spec // allCaps is the list of the capabilities. // When nil, parsed from CapEff of /proc/self/status. - allCaps []string // nolint + allCaps []string //nolint:nolintlint,unused // Ignore on non-Linux + // unpackDuplicationSuppressor is used to make sure that there is only + // one in-flight fetch request or unpack handler for a given descriptor's + // or chain ID. + unpackDuplicationSuppressor kmutex.KeyedLocker } // NewCRIService returns a new instance of CRIService @@ -111,40 +125,54 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi var err error labels := label.NewStore() c := &criService{ - config: config, - client: client, - os: osinterface.RealOS{}, - sandboxStore: sandboxstore.NewStore(labels), - containerStore: containerstore.NewStore(labels), - imageStore: imagestore.NewStore(client), - snapshotStore: snapshotstore.NewStore(), - sandboxNameIndex: registrar.NewRegistrar(), - containerNameIndex: registrar.NewRegistrar(), - initialized: atomic.NewBool(false), + config: config, + client: client, + os: osinterface.RealOS{}, + sandboxStore: sandboxstore.NewStore(labels), + containerStore: containerstore.NewStore(labels), + imageStore: imagestore.NewStore(client), + snapshotStore: snapshotstore.NewStore(), + sandboxNameIndex: registrar.NewRegistrar(), + containerNameIndex: registrar.NewRegistrar(), + initialized: atomic.NewBool(false), + netPlugin: make(map[string]cni.CNI), + unpackDuplicationSuppressor: kmutex.New(), } if client.SnapshotService(c.config.ContainerdConfig.Snapshotter) == nil { - return nil, errors.Errorf("failed to find snapshotter %q", c.config.ContainerdConfig.Snapshotter) + return nil, fmt.Errorf("failed to find snapshotter %q", c.config.ContainerdConfig.Snapshotter) } c.imageFSPath = imageFSPath(config.ContainerdRootDir, config.ContainerdConfig.Snapshotter) logrus.Infof("Get image filesystem path %q", c.imageFSPath) if err := c.initPlatform(); err != nil { - return nil, errors.Wrap(err, "initialize platform") + return nil, fmt.Errorf("initialize platform: %w", err) } // prepare streaming server c.streamServer, err = newStreamServer(c, config.StreamServerAddress, config.StreamServerPort, config.StreamIdleTimeout) if err != nil { - return nil, errors.Wrap(err, "failed to create stream server") + return nil, fmt.Errorf("failed to create stream server: %w", err) } c.eventMonitor = newEventMonitor(c) - c.cniNetConfMonitor, err = newCNINetConfSyncer(c.config.NetworkPluginConfDir, c.netPlugin, c.cniLoadOptions()) - if err != nil { - return nil, errors.Wrap(err, "failed to create cni conf monitor") + c.cniNetConfMonitor = make(map[string]*cniNetConfSyncer) + for name, i := range c.netPlugin { + path := c.config.NetworkPluginConfDir + if name != defaultNetworkPlugin { + if rc, ok := c.config.Runtimes[name]; ok { + path = rc.NetworkPluginConfDir + } + } + if path != "" { + m, err := newCNINetConfSyncer(path, i, c.cniLoadOptions()) + if err != nil { + return nil, fmt.Errorf("failed to create cni conf monitor for %s: %w", name, err) + } + c.cniNetConfMonitor[name] = m + } } // Preload base OCI specs @@ -172,13 +200,13 @@ func (c *criService) RegisterTCP(s *grpc.Server) error { } // Run starts the CRI service. -func (c *criService) Run() error { +func (c *criService) Run(ready func()) error { logrus.Info("Start subscribing containerd event") c.eventMonitor.subscribe(c.client) logrus.Infof("Start recovering state") if err := c.recover(ctrdutil.NamespacedContext()); err != nil { - return errors.Wrap(err, "failed to recover state") + return fmt.Errorf("failed to recover state: %w", err) } // Start event handler. @@ -194,12 +222,20 @@ func (c *criService) Run() error { ) snapshotsSyncer.start() - // Start CNI network conf syncer - logrus.Info("Start cni network conf syncer") - cniNetConfMonitorErrCh := make(chan error, 1) + // Start CNI network conf syncers + cniNetConfMonitorErrCh := make(chan error, len(c.cniNetConfMonitor)) + var netSyncGroup sync.WaitGroup + for name, h := range c.cniNetConfMonitor { + netSyncGroup.Add(1) + logrus.Infof("Start cni network conf syncer for %s", name) + go func(h *cniNetConfSyncer) { + cniNetConfMonitorErrCh <- h.syncLoop() + netSyncGroup.Done() + }(h) + } go func() { - defer close(cniNetConfMonitorErrCh) - cniNetConfMonitorErrCh <- c.cniNetConfMonitor.syncLoop() + netSyncGroup.Wait() + close(cniNetConfMonitorErrCh) }() // Start streaming server. @@ -215,6 +251,7 @@ func (c *criService) Run() error { // Set the server as initialized. GRPC services could start serving traffic. c.initialized.Set() + ready() var eventMonitorErr, streamServerErr, cniNetConfMonitorErr error // Stop the whole CRI service if any of the critical service exits. @@ -224,7 +261,7 @@ func (c *criService) Run() error { case cniNetConfMonitorErr = <-cniNetConfMonitorErrCh: } if err := c.Close(); err != nil { - return errors.Wrap(err, "failed to stop cri service") + return fmt.Errorf("failed to stop cri service: %w", err) } // If the error is set above, err from channel must be nil here, because // the channel is supposed to be closed. Or else, we wait and set it. @@ -251,13 +288,13 @@ func (c *criService) Run() error { logrus.Errorf("Stream server is not stopped in %q", streamServerStopTimeout) } if eventMonitorErr != nil { - return errors.Wrap(eventMonitorErr, "event monitor error") + return fmt.Errorf("event monitor error: %w", eventMonitorErr) } if streamServerErr != nil { - return errors.Wrap(streamServerErr, "stream server error") + return fmt.Errorf("stream server error: %w", streamServerErr) } if cniNetConfMonitorErr != nil { - return errors.Wrap(cniNetConfMonitorErr, "cni network conf monitor error") + return fmt.Errorf("cni network conf monitor error: %w", cniNetConfMonitorErr) } return nil } @@ -266,12 +303,14 @@ func (c *criService) Run() error { // TODO(random-liu): Make close synchronous. func (c *criService) Close() error { logrus.Info("Stop CRI service") - if err := c.cniNetConfMonitor.stop(); err != nil { - logrus.WithError(err).Error("failed to stop cni network conf monitor") + for name, h := range c.cniNetConfMonitor { + if err := h.stop(); err != nil { + logrus.WithError(err).Errorf("failed to stop cni network conf monitor for %s", name) + } } c.eventMonitor.stop() if err := c.streamServer.Stop(); err != nil { - return errors.Wrap(err, "failed to stop stream server") + return fmt.Errorf("failed to stop stream server: %w", err) } return nil } @@ -280,6 +319,9 @@ func (c *criService) register(s *grpc.Server) error { instrumented := newInstrumentedService(c) runtime.RegisterRuntimeServiceServer(s, instrumented) runtime.RegisterImageServiceServer(s, instrumented) + instrumentedAlpha := newInstrumentedAlphaService(c) + runtime_alpha.RegisterRuntimeServiceServer(s, instrumentedAlpha) + runtime_alpha.RegisterImageServiceServer(s, instrumentedAlpha) return nil } @@ -292,13 +334,13 @@ func imageFSPath(rootDir, snapshotter string) string { func loadOCISpec(filename string) (*oci.Spec, error) { file, err := os.Open(filename) if err != nil { - return nil, errors.Wrapf(err, "failed to open base OCI spec: %s", filename) + return nil, fmt.Errorf("failed to open base OCI spec: %s: %w", filename, err) } defer file.Close() spec := oci.Spec{} if err := json.NewDecoder(file).Decode(&spec); err != nil { - return nil, errors.Wrap(err, "failed to parse base OCI spec file") + return nil, fmt.Errorf("failed to parse base OCI spec file: %w", err) } return &spec, nil @@ -318,7 +360,7 @@ func loadBaseOCISpecs(config *criconfig.Config) (map[string]*oci.Spec, error) { spec, err := loadOCISpec(cfg.BaseRuntimeSpec) if err != nil { - return nil, errors.Wrapf(err, "failed to load base OCI spec from file: %s", cfg.BaseRuntimeSpec) + return nil, fmt.Errorf("failed to load base OCI spec from file: %s: %w", cfg.BaseRuntimeSpec, err) } specs[cfg.BaseRuntimeSpec] = spec diff --git a/pkg/cri/server/service_linux.go b/pkg/cri/server/service_linux.go index 1cd2c6a..020e373 100644 --- a/pkg/cri/server/service_linux.go +++ b/pkg/cri/server/service_linux.go @@ -17,11 +17,12 @@ package server import ( + "fmt" + "github.com/containerd/containerd/pkg/cap" "github.com/containerd/containerd/pkg/userns" cni "github.com/containerd/go-cni" "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -30,9 +31,7 @@ import ( const networkAttachCount = 2 // initPlatform handles linux specific initialization for the CRI service. -func (c *criService) initPlatform() error { - var err error - +func (c *criService) initPlatform() (err error) { if userns.RunningInUserNS() { if !(c.config.DisableCgroup && !c.apparmorEnabled() && c.config.RestrictOOMScoreAdj) { logrus.Warn("Running containerd in a user namespace typically requires disable_cgroup, disable_apparmor, restrict_oom_score_adj set to be true") @@ -50,22 +49,41 @@ func (c *criService) initPlatform() error { selinux.SetDisabled() } - // Pod needs to attach to at least loopback network and a non host network, - // hence networkAttachCount is 2. If there are more network configs the - // pod will be attached to all the networks but we will only use the ip - // of the default network interface as the pod IP. - c.netPlugin, err = cni.New(cni.WithMinNetworkCount(networkAttachCount), - cni.WithPluginConfDir(c.config.NetworkPluginConfDir), - cni.WithPluginMaxConfNum(c.config.NetworkPluginMaxConfNum), - cni.WithPluginDir([]string{c.config.NetworkPluginBinDir})) - if err != nil { - return errors.Wrap(err, "failed to initialize cni") + pluginDirs := map[string]string{ + defaultNetworkPlugin: c.config.NetworkPluginConfDir, + } + for name, conf := range c.config.Runtimes { + if conf.NetworkPluginConfDir != "" { + pluginDirs[name] = conf.NetworkPluginConfDir + } + } + + c.netPlugin = make(map[string]cni.CNI) + for name, dir := range pluginDirs { + max := c.config.NetworkPluginMaxConfNum + if name != defaultNetworkPlugin { + if m := c.config.Runtimes[name].NetworkPluginMaxConfNum; m != 0 { + max = m + } + } + // Pod needs to attach to at least loopback network and a non host network, + // hence networkAttachCount is 2. If there are more network configs the + // pod will be attached to all the networks but we will only use the ip + // of the default network interface as the pod IP. + i, err := cni.New(cni.WithMinNetworkCount(networkAttachCount), + cni.WithPluginConfDir(dir), + cni.WithPluginMaxConfNum(max), + cni.WithPluginDir([]string{c.config.NetworkPluginBinDir})) + if err != nil { + return fmt.Errorf("failed to initialize cni: %w", err) + } + c.netPlugin[name] = i } if c.allCaps == nil { c.allCaps, err = cap.Current() if err != nil { - return errors.Wrap(err, "failed to get caps") + return fmt.Errorf("failed to get caps: %w", err) } } diff --git a/pkg/cri/server/service_other.go b/pkg/cri/server/service_other.go index fc6cd51..35c5725 100644 --- a/pkg/cri/server/service_other.go +++ b/pkg/cri/server/service_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* diff --git a/pkg/cri/server/service_test.go b/pkg/cri/server/service_test.go index 50b0153..ad329d6 100644 --- a/pkg/cri/server/service_test.go +++ b/pkg/cri/server/service_test.go @@ -18,11 +18,11 @@ package server import ( "encoding/json" - "io/ioutil" "os" "testing" "github.com/containerd/containerd/oci" + "github.com/containerd/go-cni" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -55,7 +55,8 @@ func newTestCRIService() *criService { RootDir: testRootDir, StateDir: testStateDir, PluginConfig: criconfig.PluginConfig{ - SandboxImage: testSandboxImage, + SandboxImage: testSandboxImage, + TolerateMissingHugetlbController: true, }, }, imageFSPath: testImageFSPath, @@ -66,14 +67,16 @@ func newTestCRIService() *criService { sandboxNameIndex: registrar.NewRegistrar(), containerStore: containerstore.NewStore(labels), containerNameIndex: registrar.NewRegistrar(), - netPlugin: servertesting.NewFakeCNIPlugin(), + netPlugin: map[string]cni.CNI{ + defaultNetworkPlugin: servertesting.NewFakeCNIPlugin(), + }, } } func TestLoadBaseOCISpec(t *testing.T) { spec := oci.Spec{Version: "1.0.2", Hostname: "default"} - file, err := ioutil.TempFile("", "spec-test-") + file, err := os.CreateTemp("", "spec-test-") require.NoError(t, err) defer func() { diff --git a/pkg/cri/server/service_windows.go b/pkg/cri/server/service_windows.go index dff4759..2d660c1 100644 --- a/pkg/cri/server/service_windows.go +++ b/pkg/cri/server/service_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,8 +17,9 @@ package server import ( + "fmt" + cni "github.com/containerd/go-cni" - "github.com/pkg/errors" ) // windowsNetworkAttachCount is the minimum number of networks the PodSandbox @@ -29,18 +28,36 @@ const windowsNetworkAttachCount = 1 // initPlatform handles linux specific initialization for the CRI service. func (c *criService) initPlatform() error { - var err error - // For windows, the loopback network is added as default. - // There is no need to explicitly add one hence networkAttachCount is 1. - // If there are more network configs the pod will be attached to all the - // networks but we will only use the ip of the default network interface - // as the pod IP. - c.netPlugin, err = cni.New(cni.WithMinNetworkCount(windowsNetworkAttachCount), - cni.WithPluginConfDir(c.config.NetworkPluginConfDir), - cni.WithPluginMaxConfNum(c.config.NetworkPluginMaxConfNum), - cni.WithPluginDir([]string{c.config.NetworkPluginBinDir})) - if err != nil { - return errors.Wrap(err, "failed to initialize cni") + pluginDirs := map[string]string{ + defaultNetworkPlugin: c.config.NetworkPluginConfDir, + } + for name, conf := range c.config.Runtimes { + if conf.NetworkPluginConfDir != "" { + pluginDirs[name] = conf.NetworkPluginConfDir + } + } + + c.netPlugin = make(map[string]cni.CNI) + for name, dir := range pluginDirs { + max := c.config.NetworkPluginMaxConfNum + if name != defaultNetworkPlugin { + if m := c.config.Runtimes[name].NetworkPluginMaxConfNum; m != 0 { + max = m + } + } + // For windows, the loopback network is added as default. + // There is no need to explicitly add one hence networkAttachCount is 1. + // If there are more network configs the pod will be attached to all the + // networks but we will only use the ip of the default network interface + // as the pod IP. + i, err := cni.New(cni.WithMinNetworkCount(windowsNetworkAttachCount), + cni.WithPluginConfDir(dir), + cni.WithPluginMaxConfNum(max), + cni.WithPluginDir([]string{c.config.NetworkPluginBinDir})) + if err != nil { + return fmt.Errorf("failed to initialize cni: %w", err) + } + c.netPlugin[name] = i } return nil diff --git a/pkg/cri/server/snapshots.go b/pkg/cri/server/snapshots.go index 10dd41b..ed67275 100644 --- a/pkg/cri/server/snapshots.go +++ b/pkg/cri/server/snapshots.go @@ -18,11 +18,11 @@ package server import ( "context" + "fmt" "time" "github.com/containerd/containerd/errdefs" snapshot "github.com/containerd/containerd/snapshots" - "github.com/pkg/errors" "github.com/sirupsen/logrus" snapshotstore "github.com/containerd/containerd/pkg/cri/store/snapshot" @@ -80,7 +80,7 @@ func (s *snapshotsSyncer) sync() error { snapshots = append(snapshots, info) return nil }); err != nil { - return errors.Wrap(err, "walk all snapshots failed") + return fmt.Errorf("walk all snapshots failed: %w", err) } for _, info := range snapshots { sn, err := s.store.Get(info.Name) diff --git a/pkg/cri/server/status.go b/pkg/cri/server/status.go index 4283a51..704ff4f 100644 --- a/pkg/cri/server/status.go +++ b/pkg/cri/server/status.go @@ -23,7 +23,7 @@ import ( "github.com/containerd/containerd/log" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // networkNotReadyReason is the reason reported when network is not ready. @@ -41,11 +41,14 @@ func (c *criService) Status(ctx context.Context, r *runtime.StatusRequest) (*run Type: runtime.NetworkReady, Status: true, } + netPlugin := c.netPlugin[defaultNetworkPlugin] // Check the status of the cni initialization - if err := c.netPlugin.Status(); err != nil { - networkCondition.Status = false - networkCondition.Reason = networkNotReadyReason - networkCondition.Message = fmt.Sprintf("Network plugin returns error: %v", err) + if netPlugin != nil { + if err := netPlugin.Status(); err != nil { + networkCondition.Status = false + networkCondition.Reason = networkNotReadyReason + networkCondition.Message = fmt.Sprintf("Network plugin returns error: %v", err) + } } resp := &runtime.StatusResponse{ @@ -67,17 +70,29 @@ func (c *criService) Status(ctx context.Context, r *runtime.StatusRequest) (*run } resp.Info["golang"] = string(versionByt) - cniConfig, err := json.Marshal(c.netPlugin.GetConfig()) - if err != nil { - log.G(ctx).WithError(err).Errorf("Failed to marshal CNI config %v", err) + if netPlugin != nil { + cniConfig, err := json.Marshal(netPlugin.GetConfig()) + if err != nil { + log.G(ctx).WithError(err).Errorf("Failed to marshal CNI config %v", err) + } + resp.Info["cniconfig"] = string(cniConfig) } - resp.Info["cniconfig"] = string(cniConfig) - lastCNILoadStatus := "OK" - if lerr := c.cniNetConfMonitor.lastStatus(); lerr != nil { - lastCNILoadStatus = lerr.Error() + defaultStatus := "OK" + for name, h := range c.cniNetConfMonitor { + s := "OK" + if h == nil { + continue + } + if lerr := h.lastStatus(); lerr != nil { + s = lerr.Error() + } + resp.Info[fmt.Sprintf("lastCNILoadStatus.%s", name)] = s + if name == defaultNetworkPlugin { + defaultStatus = s + } } - resp.Info["lastCNILoadStatus"] = lastCNILoadStatus + resp.Info["lastCNILoadStatus"] = defaultStatus } return resp, nil } diff --git a/pkg/cri/server/streaming.go b/pkg/cri/server/streaming.go index d195d8d..6c9cd4b 100644 --- a/pkg/cri/server/streaming.go +++ b/pkg/cri/server/streaming.go @@ -19,13 +19,14 @@ package server import ( "context" "crypto/tls" + "errors" + "fmt" "io" "math" "net" "os" "time" - "github.com/pkg/errors" k8snet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/remotecommand" @@ -70,7 +71,7 @@ func newStreamServer(c *criService, addr, port, streamIdleTimeout string) (strea if addr == "" { a, err := k8snet.ResolveBindAddress(nil) if err != nil { - return nil, errors.Wrap(err, "failed to get stream server address") + return nil, fmt.Errorf("failed to get stream server address: %w", err) } addr = a.String() } @@ -79,20 +80,20 @@ func newStreamServer(c *criService, addr, port, streamIdleTimeout string) (strea var err error config.StreamIdleTimeout, err = time.ParseDuration(streamIdleTimeout) if err != nil { - return nil, errors.Wrap(err, "invalid stream idle timeout") + return nil, fmt.Errorf("invalid stream idle timeout: %w", err) } } config.Addr = net.JoinHostPort(addr, port) run := newStreamRuntime(c) tlsMode, err := getStreamListenerMode(c) if err != nil { - return nil, errors.Wrapf(err, "invalid stream server configuration") + return nil, fmt.Errorf("invalid stream server configuration: %w", err) } switch tlsMode { case x509KeyPairTLS: tlsCert, err := tls.LoadX509KeyPair(c.config.X509KeyPairStreaming.TLSCertFile, c.config.X509KeyPairStreaming.TLSKeyFile) if err != nil { - return nil, errors.Wrap(err, "failed to load x509 key pair for stream server") + return nil, fmt.Errorf("failed to load x509 key pair for stream server: %w", err) } config.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{tlsCert}, @@ -101,7 +102,7 @@ func newStreamServer(c *criService, addr, port, streamIdleTimeout string) (strea case selfSignTLS: tlsCert, err := newTLSCert() if err != nil { - return nil, errors.Wrap(err, "failed to generate tls certificate for stream server") + return nil, fmt.Errorf("failed to generate tls certificate for stream server: %w", err) } config.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{tlsCert}, @@ -136,13 +137,13 @@ func (s *streamRuntime) Exec(containerID string, cmd []string, stdin io.Reader, resize: resize, }) if err != nil { - return errors.Wrap(err, "failed to exec in container") + return fmt.Errorf("failed to exec in container: %w", err) } if *exitCode == 0 { return nil } return &exec.CodeExitError{ - Err: errors.Errorf("error executing command %v, exit code %d", cmd, *exitCode), + Err: fmt.Errorf("error executing command %v, exit code %d", cmd, *exitCode), Code: int(*exitCode), } } @@ -154,7 +155,7 @@ func (s *streamRuntime) Attach(containerID string, in io.Reader, out, err io.Wri func (s *streamRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { if port <= 0 || port > math.MaxUint16 { - return errors.Errorf("invalid port %d", port) + return fmt.Errorf("invalid port %d", port) } ctx := ctrdutil.NamespacedContext() return s.c.portForward(ctx, podSandboxID, port, stream) @@ -197,12 +198,12 @@ func newTLSCert() (tls.Certificate, error) { hostName, err := os.Hostname() if err != nil { - return fail(errors.Wrap(err, "failed to get hostname")) + return fail(fmt.Errorf("failed to get hostname: %w", err)) } addrs, err := net.InterfaceAddrs() if err != nil { - return fail(errors.Wrap(err, "failed to get host IP addresses")) + return fail(fmt.Errorf("failed to get host IP addresses: %w", err)) } var alternateIPs []net.IP @@ -226,13 +227,13 @@ func newTLSCert() (tls.Certificate, error) { // Generate a self signed certificate key (CA is self) certPem, keyPem, err := k8scert.GenerateSelfSignedCertKey(hostName, alternateIPs, alternateDNS) if err != nil { - return fail(errors.Wrap(err, "certificate key could not be created")) + return fail(fmt.Errorf("certificate key could not be created: %w", err)) } // Load the tls certificate tlsCert, err := tls.X509KeyPair(certPem, keyPem) if err != nil { - return fail(errors.Wrap(err, "certificate could not be loaded")) + return fail(fmt.Errorf("certificate could not be loaded: %w", err)) } return tlsCert, nil diff --git a/pkg/cri/server/testing/fake_cni_plugin.go b/pkg/cri/server/testing/fake_cni_plugin.go index 15dd496..fcc060f 100644 --- a/pkg/cri/server/testing/fake_cni_plugin.go +++ b/pkg/cri/server/testing/fake_cni_plugin.go @@ -38,11 +38,21 @@ func (f *FakeCNIPlugin) Setup(ctx context.Context, id, path string, opts ...cni. return nil, nil } +// SetupSerially sets up the network of PodSandbox without doing the interfaces in parallel. +func (f *FakeCNIPlugin) SetupSerially(ctx context.Context, id, path string, opts ...cni.NamespaceOpts) (*cni.Result, error) { + return nil, nil +} + // Remove teardown the network of PodSandbox. func (f *FakeCNIPlugin) Remove(ctx context.Context, id, path string, opts ...cni.NamespaceOpts) error { return nil } +// Check the network of PodSandbox. +func (f *FakeCNIPlugin) Check(ctx context.Context, id, path string, opts ...cni.NamespaceOpts) error { + return nil +} + // Status get the status of the plugin. func (f *FakeCNIPlugin) Status() error { return f.StatusErr diff --git a/pkg/cri/server/update_runtime_config.go b/pkg/cri/server/update_runtime_config.go index 6c725e2..e3102f9 100644 --- a/pkg/cri/server/update_runtime_config.go +++ b/pkg/cri/server/update_runtime_config.go @@ -17,16 +17,19 @@ package server import ( + "fmt" "net" "os" "path/filepath" "strings" "text/template" + "time" + + "golang.org/x/net/context" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/log" - "github.com/pkg/errors" - "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "github.com/containerd/containerd/pkg/atomicfile" ) // cniConfigTemplate contains the values containerd will overwrite @@ -61,7 +64,7 @@ func (c *criService) UpdateRuntimeConfig(ctx context.Context, r *runtime.UpdateR } routes, err := getRoutes(cidrs) if err != nil { - return nil, errors.Wrap(err, "get routes") + return nil, fmt.Errorf("get routes: %w", err) } confTemplate := c.config.NetworkPluginConfTemplate @@ -69,34 +72,27 @@ func (c *criService) UpdateRuntimeConfig(ctx context.Context, r *runtime.UpdateR log.G(ctx).Info("No cni config template is specified, wait for other system components to drop the config.") return &runtime.UpdateRuntimeConfigResponse{}, nil } - if err := c.netPlugin.Status(); err == nil { + netPlugin := c.netPlugin[defaultNetworkPlugin] + if netPlugin == nil { log.G(ctx).Infof("Network plugin is ready, skip generating cni config from template %q", confTemplate) return &runtime.UpdateRuntimeConfigResponse{}, nil - } else if err := c.netPlugin.Load(c.cniLoadOptions()...); err == nil { + } + + netStart := time.Now() + err = netPlugin.Status() + networkPluginOperations.WithValues(networkStatusOp).Inc() + networkPluginOperationsLatency.WithValues(networkStatusOp).UpdateSince(netStart) + if err == nil { + log.G(ctx).Infof("Network plugin is ready, skip generating cni config from template %q", confTemplate) + return &runtime.UpdateRuntimeConfigResponse{}, nil + } + networkPluginOperationsErrors.WithValues(networkStatusOp).Inc() + if err := netPlugin.Load(c.cniLoadOptions()...); err == nil { log.G(ctx).Infof("CNI config is successfully loaded, skip generating cni config from template %q", confTemplate) return &runtime.UpdateRuntimeConfigResponse{}, nil } - log.G(ctx).Infof("Generating cni config from template %q", confTemplate) - // generate cni config file from the template with updated pod cidr. - t, err := template.ParseFiles(confTemplate) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse cni config template %q", confTemplate) - } - if err := os.MkdirAll(c.config.NetworkPluginConfDir, 0755); err != nil { - return nil, errors.Wrapf(err, "failed to create cni config directory: %q", c.config.NetworkPluginConfDir) - } - confFile := filepath.Join(c.config.NetworkPluginConfDir, cniConfigFileName) - f, err := os.OpenFile(confFile, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, errors.Wrapf(err, "failed to open cni config file %q", confFile) - } - defer f.Close() - if err := t.Execute(f, cniConfigTemplate{ - PodCIDR: cidrs[0], - PodCIDRRanges: cidrs, - Routes: routes, - }); err != nil { - return nil, errors.Wrapf(err, "failed to generate cni config file %q", confFile) + if err := writeCNIConfigFile(ctx, c.config.NetworkPluginConfDir, confTemplate, cidrs[0], cidrs, routes); err != nil { + return nil, err } return &runtime.UpdateRuntimeConfigResponse{}, nil } @@ -126,3 +122,28 @@ func getRoutes(cidrs []string) ([]string, error) { } return routes, nil } + +func writeCNIConfigFile(ctx context.Context, confDir string, confTemplate string, podCIDR string, podCIDRRanges []string, routes []string) error { + log.G(ctx).Infof("Generating cni config from template %q", confTemplate) + // generate cni config file from the template with updated pod cidr. + t, err := template.ParseFiles(confTemplate) + if err != nil { + return fmt.Errorf("failed to parse cni config template %q: %w", confTemplate, err) + } + if err := os.MkdirAll(confDir, 0755); err != nil { + return fmt.Errorf("failed to create cni config directory: %q: %w", confDir, err) + } + confFile := filepath.Join(confDir, cniConfigFileName) + f, err := atomicfile.New(confFile, 0o644) + defer func() { + err = f.Close() + }() + if err := t.Execute(f, cniConfigTemplate{ + PodCIDR: podCIDR, + PodCIDRRanges: podCIDRRanges, + Routes: routes, + }); err != nil { + return fmt.Errorf("failed to generate cni config file %q: %w", confFile, err) + } + return err +} diff --git a/pkg/cri/server/update_runtime_config_test.go b/pkg/cri/server/update_runtime_config_test.go index ad01c5f..18eaa9d 100644 --- a/pkg/cri/server/update_runtime_config_test.go +++ b/pkg/cri/server/update_runtime_config_test.go @@ -17,16 +17,15 @@ package server import ( - "io/ioutil" + "errors" "os" "path/filepath" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" criconfig "github.com/containerd/containerd/pkg/cri/config" servertesting "github.com/containerd/containerd/pkg/cri/server/testing" @@ -37,7 +36,7 @@ func TestUpdateRuntimeConfig(t *testing.T) { testTemplate = ` { "name": "test-pod-network", - "cniVersion": "0.3.1", + "cniVersion": "1.0.0", "plugins": [ { "type": "ptp", @@ -55,7 +54,7 @@ func TestUpdateRuntimeConfig(t *testing.T) { expected = ` { "name": "test-pod-network", - "cniVersion": "0.3.1", + "cniVersion": "1.0.0", "plugins": [ { "type": "ptp", @@ -94,11 +93,9 @@ func TestUpdateRuntimeConfig(t *testing.T) { }, } { t.Run(name, func(t *testing.T) { - testDir, err := ioutil.TempDir(os.TempDir(), "test-runtime-config") - require.NoError(t, err) - defer os.RemoveAll(testDir) + testDir := t.TempDir() templateName := filepath.Join(testDir, "template") - err = ioutil.WriteFile(templateName, []byte(testTemplate), 0666) + err := os.WriteFile(templateName, []byte(testTemplate), 0666) require.NoError(t, err) confDir := filepath.Join(testDir, "net.d") confName := filepath.Join(confDir, cniConfigFileName) @@ -122,8 +119,8 @@ func TestUpdateRuntimeConfig(t *testing.T) { req.RuntimeConfig.NetworkConfig.PodCidr = "" } if !test.networkReady { - c.netPlugin.(*servertesting.FakeCNIPlugin).StatusErr = errors.New("random error") - c.netPlugin.(*servertesting.FakeCNIPlugin).LoadErr = errors.New("random error") + c.netPlugin[defaultNetworkPlugin].(*servertesting.FakeCNIPlugin).StatusErr = errors.New("random error") + c.netPlugin[defaultNetworkPlugin].(*servertesting.FakeCNIPlugin).LoadErr = errors.New("random error") } _, err = c.UpdateRuntimeConfig(context.Background(), req) assert.NoError(t, err) @@ -131,7 +128,7 @@ func TestUpdateRuntimeConfig(t *testing.T) { _, err := os.Stat(confName) assert.Error(t, err) } else { - got, err := ioutil.ReadFile(confName) + got, err := os.ReadFile(confName) assert.NoError(t, err) assert.Equal(t, expected, string(got)) } diff --git a/pkg/cri/server/version.go b/pkg/cri/server/version.go index 55eb523..76d47f9 100644 --- a/pkg/cri/server/version.go +++ b/pkg/cri/server/version.go @@ -19,7 +19,8 @@ package server import ( "github.com/containerd/containerd/version" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" + runtime_alpha "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "github.com/containerd/containerd/pkg/cri/constants" ) @@ -40,3 +41,13 @@ func (c *criService) Version(ctx context.Context, r *runtime.VersionRequest) (*r RuntimeApiVersion: constants.CRIVersion, }, nil } + +// Version returns the runtime name, runtime version and runtime API version. +func (c *criService) AlphaVersion(ctx context.Context, r *runtime_alpha.VersionRequest) (*runtime_alpha.VersionResponse, error) { + return &runtime_alpha.VersionResponse{ + Version: kubeAPIVersion, + RuntimeName: containerName, + RuntimeVersion: version.Version, + RuntimeApiVersion: constants.CRIVersionAlpha, + }, nil +} diff --git a/pkg/cri/store/container/container.go b/pkg/cri/store/container/container.go index c74965b..3aa3fe9 100644 --- a/pkg/cri/store/container/container.go +++ b/pkg/cri/store/container/container.go @@ -20,12 +20,14 @@ import ( "sync" "github.com/containerd/containerd" - "github.com/containerd/containerd/pkg/cri/store/label" - "github.com/containerd/containerd/pkg/cri/store/truncindex" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - + "github.com/containerd/containerd/errdefs" cio "github.com/containerd/containerd/pkg/cri/io" "github.com/containerd/containerd/pkg/cri/store" + "github.com/containerd/containerd/pkg/cri/store/label" + "github.com/containerd/containerd/pkg/cri/store/stats" + "github.com/containerd/containerd/pkg/cri/store/truncindex" + + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // Container contains all resources associated with the container. All methods to @@ -45,6 +47,8 @@ type Container struct { // IsStopSignaledWithTimeout the default is 0, and it is set to 1 after sending // the signal once to avoid repeated sending of the signal. IsStopSignaledWithTimeout *uint32 + // Stats contains (mutable) stats for the container + Stats *stats.ContainerStats } // Opts sets specific information to newly created Container. @@ -124,7 +128,7 @@ func (s *Store) Add(c Container) error { s.lock.Lock() defer s.lock.Unlock() if _, ok := s.containers[c.ID]; ok { - return store.ErrAlreadyExist + return errdefs.ErrAlreadyExists } if err := s.labels.Reserve(c.ProcessLabel); err != nil { return err @@ -144,14 +148,14 @@ func (s *Store) Get(id string) (Container, error) { id, err := s.idIndex.Get(id) if err != nil { if err == truncindex.ErrNotExist { - err = store.ErrNotExist + err = errdefs.ErrNotFound } return Container{}, err } if c, ok := s.containers[id]; ok { return c, nil } - return Container{}, store.ErrNotExist + return Container{}, errdefs.ErrNotFound } // List lists all containers. @@ -165,6 +169,27 @@ func (s *Store) List() []Container { return containers } +func (s *Store) UpdateContainerStats(id string, newContainerStats *stats.ContainerStats) error { + s.lock.Lock() + defer s.lock.Unlock() + id, err := s.idIndex.Get(id) + if err != nil { + if err == truncindex.ErrNotExist { + err = errdefs.ErrNotFound + } + return err + } + + if _, ok := s.containers[id]; !ok { + return errdefs.ErrNotFound + } + + c := s.containers[id] + c.Stats = newContainerStats + s.containers[id] = c + return nil +} + // Delete deletes the container from store with specified id. func (s *Store) Delete(id string) { s.lock.Lock() @@ -175,7 +200,11 @@ func (s *Store) Delete(id string) { // So we need to return if there are error. return } - s.labels.Release(s.containers[id].ProcessLabel) - s.idIndex.Delete(id) // nolint: errcheck + c := s.containers[id] + if c.IO != nil { + c.IO.Close() + } + s.labels.Release(c.ProcessLabel) + s.idIndex.Delete(id) delete(s.containers, id) } diff --git a/pkg/cri/store/container/container_test.go b/pkg/cri/store/container/container_test.go index 2a65857..6f7bd69 100644 --- a/pkg/cri/store/container/container_test.go +++ b/pkg/cri/store/container/container_test.go @@ -21,13 +21,14 @@ import ( "testing" "time" + "github.com/containerd/containerd/errdefs" + cio "github.com/containerd/containerd/pkg/cri/io" "github.com/containerd/containerd/pkg/cri/store/label" + "github.com/containerd/containerd/pkg/cri/store/stats" + "github.com/opencontainers/selinux/go-selinux" assertlib "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - - cio "github.com/containerd/containerd/pkg/cri/io" - "github.com/containerd/containerd/pkg/cri/store" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestContainerStore(t *testing.T) { @@ -132,6 +133,25 @@ func TestContainerStore(t *testing.T) { Removing: true, }, } + + stats := map[string]*stats.ContainerStats{ + "1": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 1, + }, + "2abcd": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 2, + }, + "4a333": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 3, + }, + "4abcd": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 4, + }, + } assert := assertlib.New(t) containers := map[string]Container{} for id := range metadatas { @@ -163,12 +183,26 @@ func TestContainerStore(t *testing.T) { got, err := s.Get(genTruncIndex(id)) assert.NoError(err) assert.Equal(c, got) + assert.Nil(c.Stats) } t.Logf("should be able to list containers") cs := s.List() assert.Len(cs, len(containers)) + t.Logf("should be able to update stats on container") + for id := range containers { + err := s.UpdateContainerStats(id, stats[id]) + assert.NoError(err) + } + + // Validate stats were updated + cs = s.List() + assert.Len(cs, len(containers)) + for _, c := range cs { + assert.Equal(stats[c.ID], c.Stats) + } + if selinux.GetEnabled() { t.Logf("should have reserved labels (requires -tag selinux)") assert.Equal(map[string]bool{ @@ -183,7 +217,7 @@ func TestContainerStore(t *testing.T) { truncID := genTruncIndex(testID) t.Logf("add should return already exists error for duplicated container") - assert.Equal(store.ErrAlreadyExist, s.Add(v)) + assert.Equal(errdefs.ErrAlreadyExists, s.Add(v)) t.Logf("should be able to delete container") s.Delete(truncID) @@ -194,7 +228,7 @@ func TestContainerStore(t *testing.T) { t.Logf("get should return not exist error after deletion") c, err := s.Get(truncID) assert.Equal(Container{}, c) - assert.Equal(store.ErrNotExist, err) + assert.Equal(errdefs.ErrNotFound, err) } if selinux.GetEnabled() { diff --git a/pkg/cri/store/container/metadata.go b/pkg/cri/store/container/metadata.go index ff9b5f2..698e495 100644 --- a/pkg/cri/store/container/metadata.go +++ b/pkg/cri/store/container/metadata.go @@ -18,9 +18,9 @@ package container import ( "encoding/json" + "fmt" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // NOTE(random-liu): @@ -28,10 +28,9 @@ import ( // 2) Metadata is checkpointed as containerd container label. // metadataVersion is current version of container metadata. -const metadataVersion = "v1" // nolint +const metadataVersion = "v1" // versionedMetadata is the internal versioned container metadata. -// nolint type versionedMetadata struct { // Version indicates the version of the versioned container metadata. Version string @@ -85,5 +84,5 @@ func (c *Metadata) UnmarshalJSON(data []byte) error { *c = Metadata(versioned.Metadata) return nil } - return errors.Errorf("unsupported version: %q", versioned.Version) + return fmt.Errorf("unsupported version: %q", versioned.Version) } diff --git a/pkg/cri/store/container/metadata_test.go b/pkg/cri/store/container/metadata_test.go index 297bc09..bb52fa2 100644 --- a/pkg/cri/store/container/metadata_test.go +++ b/pkg/cri/store/container/metadata_test.go @@ -21,7 +21,7 @@ import ( "testing" assertlib "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestMetadataMarshalUnmarshal(t *testing.T) { diff --git a/pkg/cri/store/container/status.go b/pkg/cri/store/container/status.go index 940dabf..0f6d38b 100644 --- a/pkg/cri/store/container/status.go +++ b/pkg/cri/store/container/status.go @@ -18,14 +18,14 @@ package container import ( "encoding/json" - "io/ioutil" + "errors" + "fmt" "os" "path/filepath" "sync" "github.com/containerd/continuity" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // The container state machine in the CRI plugin: @@ -61,10 +61,9 @@ import ( // DELETED // statusVersion is current version of container status. -const statusVersion = "v1" // nolint +const statusVersion = "v1" // versionedStatus is the internal used versioned container status. -// nolint type versionedStatus struct { // Version indicates the version of the versioned container status. Version string @@ -97,6 +96,8 @@ type Status struct { // Unknown indicates that the container status is not fully loaded. // This field doesn't need to be checkpointed. Unknown bool `json:"-"` + // Resources has container runtime resource constraints + Resources *runtime.ContainerResources } // State returns current state of the container based on the container status. @@ -166,11 +167,11 @@ type StatusStorage interface { func StoreStatus(root, id string, status Status) (StatusStorage, error) { data, err := status.encode() if err != nil { - return nil, errors.Wrap(err, "failed to encode status") + return nil, fmt.Errorf("failed to encode status: %w", err) } path := filepath.Join(root, "status") if err := continuity.AtomicWriteFile(path, data, 0600); err != nil { - return nil, errors.Wrapf(err, "failed to checkpoint status to %q", path) + return nil, fmt.Errorf("failed to checkpoint status to %q: %w", path, err) } return &statusStorage{ path: path, @@ -182,13 +183,13 @@ func StoreStatus(root, id string, status Status) (StatusStorage, error) { // writing to the file during loading. func LoadStatus(root, id string) (Status, error) { path := filepath.Join(root, "status") - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { - return Status{}, errors.Wrapf(err, "failed to read status from %q", path) + return Status{}, fmt.Errorf("failed to read status from %q: %w", path, err) } var status Status if err := status.decode(data); err != nil { - return Status{}, errors.Wrapf(err, "failed to decode status %q", data) + return Status{}, fmt.Errorf("failed to decode status %q: %w", data, err) } return status, nil } @@ -203,7 +204,58 @@ type statusStorage struct { func (s *statusStorage) Get() Status { s.RLock() defer s.RUnlock() - return s.status + // Deep copy is needed in case some fields in Status are updated after Get() + // is called. + return deepCopyOf(s.status) +} + +func deepCopyOf(s Status) Status { + copy := s + // Resources is the only field that is a pointer, and therefore needs + // a manual deep copy. + // This will need updates when new fields are added to ContainerResources. + if s.Resources == nil { + return copy + } + copy.Resources = &runtime.ContainerResources{} + if s.Resources != nil && s.Resources.Linux != nil { + hugepageLimits := make([]*runtime.HugepageLimit, 0, len(s.Resources.Linux.HugepageLimits)) + for _, l := range s.Resources.Linux.HugepageLimits { + if l != nil { + hugepageLimits = append(hugepageLimits, &runtime.HugepageLimit{ + PageSize: l.PageSize, + Limit: l.Limit, + }) + } + } + copy.Resources = &runtime.ContainerResources{ + Linux: &runtime.LinuxContainerResources{ + CpuPeriod: s.Resources.Linux.CpuPeriod, + CpuQuota: s.Resources.Linux.CpuQuota, + CpuShares: s.Resources.Linux.CpuShares, + CpusetCpus: s.Resources.Linux.CpusetCpus, + CpusetMems: s.Resources.Linux.CpusetMems, + MemoryLimitInBytes: s.Resources.Linux.MemoryLimitInBytes, + MemorySwapLimitInBytes: s.Resources.Linux.MemorySwapLimitInBytes, + OomScoreAdj: s.Resources.Linux.OomScoreAdj, + Unified: s.Resources.Linux.Unified, + HugepageLimits: hugepageLimits, + }, + } + } + + if s.Resources != nil && s.Resources.Windows != nil { + copy.Resources = &runtime.ContainerResources{ + Windows: &runtime.WindowsContainerResources{ + CpuShares: s.Resources.Windows.CpuShares, + CpuCount: s.Resources.Windows.CpuCount, + CpuMaximum: s.Resources.Windows.CpuMaximum, + MemoryLimitInBytes: s.Resources.Windows.MemoryLimitInBytes, + RootfsSizeInBytes: s.Resources.Windows.RootfsSizeInBytes, + }, + } + } + return copy } // UpdateSync updates the container status and the on disk checkpoint. @@ -216,10 +268,10 @@ func (s *statusStorage) UpdateSync(u UpdateFunc) error { } data, err := newStatus.encode() if err != nil { - return errors.Wrap(err, "failed to encode status") + return fmt.Errorf("failed to encode status: %w", err) } if err := continuity.AtomicWriteFile(s.path, data, 0600); err != nil { - return errors.Wrapf(err, "failed to checkpoint status to %q", s.path) + return fmt.Errorf("failed to checkpoint status to %q: %w", s.path, err) } s.status = newStatus return nil diff --git a/pkg/cri/store/container/status_test.go b/pkg/cri/store/container/status_test.go index 702cc26..b13086b 100644 --- a/pkg/cri/store/container/status_test.go +++ b/pkg/cri/store/container/status_test.go @@ -19,7 +19,6 @@ package container import ( "encoding/json" "errors" - "io/ioutil" "os" "path/filepath" "testing" @@ -27,7 +26,7 @@ import ( assertlib "github.com/stretchr/testify/assert" requirelib "github.com/stretchr/testify/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestContainerState(t *testing.T) { @@ -115,9 +114,7 @@ func TestStatus(t *testing.T) { assert := assertlib.New(t) require := requirelib.New(t) - tempDir, err := ioutil.TempDir(os.TempDir(), "status-test") - require.NoError(err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() statusFile := filepath.Join(tempDir, "status") t.Logf("simple store and get") @@ -133,8 +130,7 @@ func TestStatus(t *testing.T) { t.Logf("failed update should not take effect") err = s.Update(func(o Status) (Status, error) { - o = updateStatus - return o, updateErr + return updateStatus, updateErr }) assert.Equal(updateErr, err) assert.Equal(testStatus, s.Get()) @@ -144,8 +140,7 @@ func TestStatus(t *testing.T) { t.Logf("successful update should take effect but not checkpoint") err = s.Update(func(o Status) (Status, error) { - o = updateStatus - return o, nil + return updateStatus, nil }) assert.NoError(err) assert.Equal(updateStatus, s.Get()) @@ -154,14 +149,12 @@ func TestStatus(t *testing.T) { assert.Equal(testStatus, loaded) // Recover status. assert.NoError(s.Update(func(o Status) (Status, error) { - o = testStatus - return o, nil + return testStatus, nil })) t.Logf("failed update sync should not take effect") err = s.UpdateSync(func(o Status) (Status, error) { - o = updateStatus - return o, updateErr + return updateStatus, updateErr }) assert.Equal(updateErr, err) assert.Equal(testStatus, s.Get()) @@ -171,8 +164,7 @@ func TestStatus(t *testing.T) { t.Logf("successful update sync should take effect and checkpoint") err = s.UpdateSync(func(o Status) (Status, error) { - o = updateStatus - return o, nil + return updateStatus, nil }) assert.NoError(err) assert.Equal(updateStatus, s.Get()) diff --git a/pkg/cri/store/errors_test.go b/pkg/cri/store/errors_test.go index 4171e0b..11b38bf 100644 --- a/pkg/cri/store/errors_test.go +++ b/pkg/cri/store/errors_test.go @@ -19,14 +19,14 @@ package store import ( "testing" + "github.com/containerd/containerd/errdefs" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "github.com/containerd/containerd/errdefs" ) func TestStoreErrAlreadyExistGRPCStatus(t *testing.T) { - err := errdefs.ToGRPC(ErrAlreadyExist) + err := errdefs.ToGRPC(errdefs.ErrAlreadyExists) s, ok := status.FromError(err) if !ok { t.Fatalf("failed to convert err: %v to status: %d", err, codes.AlreadyExists) @@ -37,7 +37,7 @@ func TestStoreErrAlreadyExistGRPCStatus(t *testing.T) { } func TestStoreErrNotExistGRPCStatus(t *testing.T) { - err := errdefs.ToGRPC(ErrNotExist) + err := errdefs.ToGRPC(errdefs.ErrNotFound) s, ok := status.FromError(err) if !ok { t.Fatalf("failed to convert err: %v to status: %d", err, codes.NotFound) diff --git a/pkg/cri/store/image/fake_image.go b/pkg/cri/store/image/fake_image.go index e1d6b7c..4b46360 100644 --- a/pkg/cri/store/image/fake_image.go +++ b/pkg/cri/store/image/fake_image.go @@ -16,7 +16,7 @@ package image -import "github.com/pkg/errors" +import "fmt" // NewFakeStore returns an image store with predefined images. // Update is not allowed for this fake store. @@ -27,7 +27,7 @@ func NewFakeStore(images []Image) (*Store, error) { s.refCache[ref] = i.ID } if err := s.store.add(i); err != nil { - return nil, errors.Wrapf(err, "add image %+v", i) + return nil, fmt.Errorf("add image %+v: %w", i, err) } } return s, nil diff --git a/pkg/cri/store/image/image.go b/pkg/cri/store/image/image.go index 27cd8bb..45a3ad5 100644 --- a/pkg/cri/store/image/image.go +++ b/pkg/cri/store/image/image.go @@ -19,19 +19,19 @@ package image import ( "context" "encoding/json" + "fmt" "sync" "github.com/containerd/containerd" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/cri/labels" + "github.com/containerd/containerd/pkg/cri/util" + imagedigest "github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest/digestset" imageidentity "github.com/opencontainers/image-spec/identity" imagespec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - - storeutil "github.com/containerd/containerd/pkg/cri/store" - "github.com/containerd/containerd/pkg/cri/util" ) // Image contains all resources associated with the image. All fields @@ -47,6 +47,8 @@ type Image struct { Size int64 // ImageSpec is the oci image structure which describes basic information about the image. ImageSpec imagespec.Image + // Pinned image to prevent it from garbage collection + Pinned bool } // Store stores all images. @@ -78,13 +80,13 @@ func (s *Store) Update(ctx context.Context, ref string) error { defer s.lock.Unlock() i, err := s.client.GetImage(ctx, ref) if err != nil && !errdefs.IsNotFound(err) { - return errors.Wrap(err, "get image from containerd") + return fmt.Errorf("get image from containerd: %w", err) } var img *Image if err == nil { img, err = getImage(ctx, i) if err != nil { - return errors.Wrap(err, "get image info from containerd") + return fmt.Errorf("get image info from containerd: %w", err) } } return s.update(ref, img) @@ -120,37 +122,41 @@ func getImage(ctx context.Context, i containerd.Image) (*Image, error) { // Get image information. diffIDs, err := i.RootFS(ctx) if err != nil { - return nil, errors.Wrap(err, "get image diffIDs") + return nil, fmt.Errorf("get image diffIDs: %w", err) } chainID := imageidentity.ChainID(diffIDs) size, err := i.Size(ctx) if err != nil { - return nil, errors.Wrap(err, "get image compressed resource size") + return nil, fmt.Errorf("get image compressed resource size: %w", err) } desc, err := i.Config(ctx) if err != nil { - return nil, errors.Wrap(err, "get image config descriptor") + return nil, fmt.Errorf("get image config descriptor: %w", err) } id := desc.Digest.String() rb, err := content.ReadBlob(ctx, i.ContentStore(), desc) if err != nil { - return nil, errors.Wrap(err, "read image config from content store") + return nil, fmt.Errorf("read image config from content store: %w", err) } var ociimage imagespec.Image if err := json.Unmarshal(rb, &ociimage); err != nil { - return nil, errors.Wrapf(err, "unmarshal image config %s", rb) + return nil, fmt.Errorf("unmarshal image config %s: %w", rb, err) } + pinned := i.Labels()[labels.PinnedImageLabelKey] == labels.PinnedImageLabelValue + return &Image{ ID: id, References: []string{i.Name()}, ChainID: chainID.String(), Size: size, ImageSpec: ociimage, + Pinned: pinned, }, nil + } // Resolve resolves a image reference to image id. @@ -159,7 +165,7 @@ func (s *Store) Resolve(ref string) (string, error) { defer s.lock.RUnlock() id, ok := s.refCache[ref] if !ok { - return "", storeutil.ErrNotExist + return "", errdefs.ErrNotFound } return id, nil } @@ -222,14 +228,14 @@ func (s *store) get(id string) (Image, error) { digest, err := s.digestSet.Lookup(id) if err != nil { if err == digestset.ErrDigestNotFound { - err = storeutil.ErrNotExist + err = errdefs.ErrNotFound } return Image{}, err } if i, ok := s.images[digest.String()]; ok { return i, nil } - return Image{}, storeutil.ErrNotExist + return Image{}, errdefs.ErrNotFound } func (s *store) delete(id, ref string) { @@ -251,6 +257,6 @@ func (s *store) delete(id, ref string) { return } // Remove the image if it is not referenced any more. - s.digestSet.Remove(digest) // nolint: errcheck + s.digestSet.Remove(digest) delete(s.images, digest.String()) } diff --git a/pkg/cri/store/image/image_test.go b/pkg/cri/store/image/image_test.go index 08bd266..5fda8a7 100644 --- a/pkg/cri/store/image/image_test.go +++ b/pkg/cri/store/image/image_test.go @@ -21,10 +21,10 @@ import ( "strings" "testing" + "github.com/containerd/containerd/errdefs" + "github.com/opencontainers/go-digest/digestset" assertlib "github.com/stretchr/testify/assert" - - storeutil "github.com/containerd/containerd/pkg/cri/store" ) func TestInternalStore(t *testing.T) { @@ -128,7 +128,7 @@ func TestInternalStore(t *testing.T) { t.Logf("should be able to delete image") s.delete(truncID, newRef) got, err = s.get(truncID) - assert.Equal(storeutil.ErrNotExist, err) + assert.Equal(errdefs.ErrNotFound, err) assert.Equal(Image{}, got) imageNum-- @@ -241,7 +241,7 @@ func TestImageStore(t *testing.T) { if test.image == nil { // Shouldn't be able to index by removed ref. id, err := s.Resolve(test.ref) - assert.Equal(storeutil.ErrNotExist, err) + assert.Equal(errdefs.ErrNotFound, err) assert.Empty(id) } } diff --git a/pkg/cri/store/image/sort_test.go b/pkg/cri/store/image/sort_test.go index 65f0d91..00731ec 100644 --- a/pkg/cri/store/image/sort_test.go +++ b/pkg/cri/store/image/sort_test.go @@ -18,7 +18,6 @@ package image import ( "io" - "io/ioutil" "math/rand" "testing" @@ -27,7 +26,7 @@ import ( func TestReferenceSorting(t *testing.T) { digested := func(seed int64) string { - b, err := ioutil.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), 64)) + b, err := io.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), 64)) if err != nil { panic(err) } diff --git a/pkg/cri/store/sandbox/metadata.go b/pkg/cri/store/sandbox/metadata.go index f8c5d4d..20fe2f1 100644 --- a/pkg/cri/store/sandbox/metadata.go +++ b/pkg/cri/store/sandbox/metadata.go @@ -18,10 +18,10 @@ package sandbox import ( "encoding/json" + "fmt" cni "github.com/containerd/go-cni" - "github.com/pkg/errors" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // NOTE(random-liu): @@ -29,10 +29,9 @@ import ( // 2) Metadata is checkpointed as containerd container label. // metadataVersion is current version of sandbox metadata. -const metadataVersion = "v1" // nolint +const metadataVersion = "v1" // versionedMetadata is the internal versioned sandbox metadata. -// nolint type versionedMetadata struct { // Version indicates the version of the versioned sandbox metadata. Version string @@ -85,5 +84,5 @@ func (c *Metadata) UnmarshalJSON(data []byte) error { *c = Metadata(versioned.Metadata) return nil } - return errors.Errorf("unsupported version: %q", versioned.Version) + return fmt.Errorf("unsupported version: %q", versioned.Version) } diff --git a/pkg/cri/store/sandbox/metadata_test.go b/pkg/cri/store/sandbox/metadata_test.go index d0a51d9..22117a9 100644 --- a/pkg/cri/store/sandbox/metadata_test.go +++ b/pkg/cri/store/sandbox/metadata_test.go @@ -21,7 +21,7 @@ import ( "testing" assertlib "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestMetadataMarshalUnmarshal(t *testing.T) { diff --git a/pkg/cri/store/sandbox/sandbox.go b/pkg/cri/store/sandbox/sandbox.go index 4addbe0..d9a55d3 100644 --- a/pkg/cri/store/sandbox/sandbox.go +++ b/pkg/cri/store/sandbox/sandbox.go @@ -20,10 +20,11 @@ import ( "sync" "github.com/containerd/containerd" - "github.com/containerd/containerd/pkg/cri/store/label" - "github.com/containerd/containerd/pkg/cri/store/truncindex" - + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/pkg/cri/store" + "github.com/containerd/containerd/pkg/cri/store/label" + "github.com/containerd/containerd/pkg/cri/store/stats" + "github.com/containerd/containerd/pkg/cri/store/truncindex" "github.com/containerd/containerd/pkg/netns" ) @@ -42,6 +43,8 @@ type Sandbox struct { NetNS *netns.NetNS // StopCh is used to propagate the stop information of the sandbox. *store.StopCh + // Stats contains (mutable) stats for the (pause) sandbox container + Stats *stats.ContainerStats } // NewSandbox creates an internally used sandbox type. This functions reminds @@ -80,7 +83,7 @@ func (s *Store) Add(sb Sandbox) error { s.lock.Lock() defer s.lock.Unlock() if _, ok := s.sandboxes[sb.ID]; ok { - return store.ErrAlreadyExist + return errdefs.ErrAlreadyExists } if err := s.labels.Reserve(sb.ProcessLabel); err != nil { return err @@ -100,14 +103,14 @@ func (s *Store) Get(id string) (Sandbox, error) { id, err := s.idIndex.Get(id) if err != nil { if err == truncindex.ErrNotExist { - err = store.ErrNotExist + err = errdefs.ErrNotFound } return Sandbox{}, err } if sb, ok := s.sandboxes[id]; ok { return sb, nil } - return Sandbox{}, store.ErrNotExist + return Sandbox{}, errdefs.ErrNotFound } // List lists all sandboxes. @@ -121,6 +124,27 @@ func (s *Store) List() []Sandbox { return sandboxes } +func (s *Store) UpdateContainerStats(id string, newContainerStats *stats.ContainerStats) error { + s.lock.Lock() + defer s.lock.Unlock() + id, err := s.idIndex.Get(id) + if err != nil { + if err == truncindex.ErrNotExist { + err = errdefs.ErrNotFound + } + return err + } + + if _, ok := s.sandboxes[id]; !ok { + return errdefs.ErrNotFound + } + + c := s.sandboxes[id] + c.Stats = newContainerStats + s.sandboxes[id] = c + return nil +} + // Delete deletes the sandbox with specified id. func (s *Store) Delete(id string) { s.lock.Lock() @@ -132,6 +156,6 @@ func (s *Store) Delete(id string) { return } s.labels.Release(s.sandboxes[id].ProcessLabel) - s.idIndex.Delete(id) // nolint: errcheck + s.idIndex.Delete(id) delete(s.sandboxes, id) } diff --git a/pkg/cri/store/sandbox/sandbox_test.go b/pkg/cri/store/sandbox/sandbox_test.go index b26de24..68ff056 100644 --- a/pkg/cri/store/sandbox/sandbox_test.go +++ b/pkg/cri/store/sandbox/sandbox_test.go @@ -18,12 +18,14 @@ package sandbox import ( "testing" + "time" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/pkg/cri/store/label" - assertlib "github.com/stretchr/testify/assert" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "github.com/containerd/containerd/pkg/cri/store/stats" - "github.com/containerd/containerd/pkg/cri/store" + assertlib "github.com/stretchr/testify/assert" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestSandboxStore(t *testing.T) { @@ -109,6 +111,24 @@ func TestSandboxStore(t *testing.T) { }, Status{State: StateUnknown}, ) + stats := map[string]*stats.ContainerStats{ + "1": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 1, + }, + "2abcd": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 2, + }, + "4a333": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 3, + }, + "4abcd": { + Timestamp: time.Now(), + UsageCoreNanoSeconds: 4, + }, + } assert := assertlib.New(t) s := NewStore(label.NewStore()) @@ -136,11 +156,24 @@ func TestSandboxStore(t *testing.T) { sbs := s.List() assert.Len(sbs, sbNum) + t.Logf("should be able to update stats on container") + for id := range sandboxes { + err := s.UpdateContainerStats(id, stats[id]) + assert.NoError(err) + } + + // Validate stats were updated + sbs = s.List() + assert.Len(sbs, sbNum) + for _, sb := range sbs { + assert.Equal(stats[sb.ID], sb.Stats) + } + for testID, v := range sandboxes { truncID := genTruncIndex(testID) t.Logf("add should return already exists error for duplicated sandbox") - assert.Equal(store.ErrAlreadyExist, s.Add(v)) + assert.Equal(errdefs.ErrAlreadyExists, s.Add(v)) t.Logf("should be able to delete sandbox") s.Delete(truncID) @@ -151,6 +184,6 @@ func TestSandboxStore(t *testing.T) { t.Logf("get should return not exist error after deletion") sb, err := s.Get(truncID) assert.Equal(Sandbox{}, sb) - assert.Equal(store.ErrNotExist, err) + assert.Equal(errdefs.ErrNotFound, err) } } diff --git a/pkg/cri/store/sandbox/status.go b/pkg/cri/store/sandbox/status.go index e9198eb..dff96cc 100644 --- a/pkg/cri/store/sandbox/status.go +++ b/pkg/cri/store/sandbox/status.go @@ -21,7 +21,7 @@ import ( "sync" "time" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // The sandbox state machine in the CRI plugin: @@ -29,24 +29,24 @@ import ( // | | // | Create(Run) | Load // | | -// Start | | -// (failed) | | -// +------------------+ +-----------+ -// | | | | -// | | | | -// | | | | -// | | Start(Run) | | -// | | | | -// | PortForward +----v----+ | | -// | +------+ | | | -// | | | READY <---------+ | -// | +------> | | | -// | +----+----+ | | -// | | | | -// | | Stop/Exit | | -// | | | | -// | +----v----+ | | -// | | <---------+ +----v----+ +// | | +// | | Start +// | |(failed and not cleaned) +// Start |--------------|--------------+ +//(failed but cleaned)| | | +// +------------------+ |-----------+ | +// | | Start(Run) | | | +// | | | | | +// | PortForward +----v----+ | | | +// | +------+ | | | | +// | | | READY <---------+ | | +// | +------> | | | | +// | +----+----+ | | | +// | | | | | +// | | Stop/Exit | | | +// | | | | | +// | +----v----+ | | | +// | | <---------+ +----v--v-+ // | | NOTREADY| | | // | | <----------------+ UNKNOWN | // | +----+----+ Stop | | diff --git a/pkg/cri/store/sandbox/status_test.go b/pkg/cri/store/sandbox/status_test.go index ad27db0..fc74629 100644 --- a/pkg/cri/store/sandbox/status_test.go +++ b/pkg/cri/store/sandbox/status_test.go @@ -45,16 +45,14 @@ func TestStatus(t *testing.T) { t.Logf("failed update should not take effect") err := s.Update(func(o Status) (Status, error) { - o = updateStatus - return o, updateErr + return updateStatus, updateErr }) assert.Equal(updateErr, err) assert.Equal(testStatus, s.Get()) t.Logf("successful update should take effect but not checkpoint") err = s.Update(func(o Status) (Status, error) { - o = updateStatus - return o, nil + return updateStatus, nil }) assert.NoError(err) assert.Equal(updateStatus, s.Get()) diff --git a/pkg/cri/store/snapshot/snapshot.go b/pkg/cri/store/snapshot/snapshot.go index 1e36298..ca046f1 100644 --- a/pkg/cri/store/snapshot/snapshot.go +++ b/pkg/cri/store/snapshot/snapshot.go @@ -19,9 +19,8 @@ package snapshot import ( "sync" + "github.com/containerd/containerd/errdefs" snapshot "github.com/containerd/containerd/snapshots" - - "github.com/containerd/containerd/pkg/cri/store" ) // Snapshot contains the information about the snapshot. @@ -65,7 +64,7 @@ func (s *Store) Get(key string) (Snapshot, error) { if sn, ok := s.snapshots[key]; ok { return sn, nil } - return Snapshot{}, store.ErrNotExist + return Snapshot{}, errdefs.ErrNotFound } // List lists all snapshots. diff --git a/pkg/cri/store/snapshot/snapshot_test.go b/pkg/cri/store/snapshot/snapshot_test.go index de263fd..5c62976 100644 --- a/pkg/cri/store/snapshot/snapshot_test.go +++ b/pkg/cri/store/snapshot/snapshot_test.go @@ -20,10 +20,10 @@ import ( "testing" "time" + "github.com/containerd/containerd/errdefs" snapshot "github.com/containerd/containerd/snapshots" - assertlib "github.com/stretchr/testify/assert" - "github.com/containerd/containerd/pkg/cri/store" + assertlib "github.com/stretchr/testify/assert" ) func TestSnapshotStore(t *testing.T) { @@ -80,5 +80,5 @@ func TestSnapshotStore(t *testing.T) { t.Logf("get should return empty struct and ErrNotExist after deletion") sn, err := s.Get(testKey) assert.Equal(Snapshot{}, sn) - assert.Equal(store.ErrNotExist, err) + assert.Equal(errdefs.ErrNotFound, err) } diff --git a/pkg/cri/store/stats/stats.go b/pkg/cri/store/stats/stats.go new file mode 100644 index 0000000..b2dd581 --- /dev/null +++ b/pkg/cri/store/stats/stats.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package stats + +import "time" + +// ContainerStats contains the information about container stats. +type ContainerStats struct { + // Timestamp of when stats were collected + Timestamp time.Time + // Cumulative CPU usage (sum across all cores) since object creation. + UsageCoreNanoSeconds uint64 +} diff --git a/pkg/cri/streaming/portforward/httpstream.go b/pkg/cri/streaming/portforward/httpstream.go index f961cdb..e157038 100644 --- a/pkg/cri/streaming/portforward/httpstream.go +++ b/pkg/cri/streaming/portforward/httpstream.go @@ -38,6 +38,7 @@ import ( "net/http" "strconv" "sync" + "syscall" "time" api "k8s.io/api/core/v1" @@ -155,11 +156,11 @@ func (h *httpStreamHandler) getStreamPair(requestID string) (*httpStreamPair, bo func (h *httpStreamHandler) monitorStreamPair(p *httpStreamPair, timeout <-chan time.Time) { select { case <-timeout: - err := fmt.Errorf("(conn=%v, request=%s) timed out waiting for streams", h.conn, p.requestID) + err := fmt.Errorf("(conn=%p, request=%s) timed out waiting for streams", h.conn, p.requestID) utilruntime.HandleError(err) p.printError(err.Error()) case <-p.complete: - klog.V(5).Infof("(conn=%v, request=%s) successfully received error and data streams", h.conn, p.requestID) + klog.V(5).Infof("(conn=%p, request=%s) successfully received error and data streams", h.conn, p.requestID) } h.removeStreamPair(p.requestID) } @@ -242,8 +243,16 @@ Loop: // portForward invokes the httpStreamHandler's forwarder.PortForward // function for the given stream pair. func (h *httpStreamHandler) portForward(p *httpStreamPair) { - defer p.dataStream.Close() - defer p.errorStream.Close() + resetStreams := false + defer func() { + if resetStreams { + p.dataStream.Reset() + p.errorStream.Reset() + return + } + p.dataStream.Close() + p.errorStream.Close() + }() portString := p.dataStream.Headers().Get(api.PortHeader) port, _ := strconv.ParseInt(portString, 10, 32) @@ -252,11 +261,34 @@ func (h *httpStreamHandler) portForward(p *httpStreamPair) { err := h.forwarder.PortForward(h.pod, h.uid, int32(port), p.dataStream) klog.V(5).Infof("(conn=%p, request=%s) done invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) - if err != nil { - msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", port, h.pod, h.uid, err) - utilruntime.HandleError(msg) - fmt.Fprint(p.errorStream, msg.Error()) + // happy path, we have successfully completed forwarding task + if err == nil { + return } + + if errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { + // In the process of forwarding, we encountered error types that can be handled: + // + // These two errors can occur in the following scenarios: + // ECONNRESET: the target process reset connection between CRI and itself. + // see: https://github.com/kubernetes/kubernetes/issues/111825 for detail + // + // EPIPE: the target process did not read the received data, causing the + // buffer in the kernel to be full, resulting in the occurrence of Zero Window, + // then closing the connection (FIN, RESET) + // see: https://github.com/kubernetes/kubernetes/issues/74551 for detail + // + // In both cases, we should RESET the httpStream. + klog.ErrorS(err, "forwarding port", "conn", h.conn, "request", p.requestID, "port", portString) + resetStreams = true + return + } + + // We don't know how to deal with other types of errors, + // try to forward them to errStream, let our user know what happened + msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", port, h.pod, h.uid, err) + utilruntime.HandleError(msg) + fmt.Fprint(p.errorStream, msg.Error()) } // httpStreamPair represents the error and data streams for a port diff --git a/pkg/cri/streaming/request_cache.go b/pkg/cri/streaming/request_cache.go index 36d6921..630b3ce 100644 --- a/pkg/cri/streaming/request_cache.go +++ b/pkg/cri/streaming/request_cache.go @@ -41,7 +41,7 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) var ( diff --git a/pkg/cri/streaming/server.go b/pkg/cri/streaming/server.go index 4f54eeb..244d195 100644 --- a/pkg/cri/streaming/server.go +++ b/pkg/cri/streaming/server.go @@ -45,12 +45,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - restful "github.com/emicklei/go-restful" + restful "github.com/emicklei/go-restful/v3" "k8s.io/apimachinery/pkg/types" remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" "k8s.io/client-go/tools/remotecommand" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/containerd/containerd/pkg/cri/streaming/portforward" remotecommandserver "github.com/containerd/containerd/pkg/cri/streaming/remotecommand" @@ -165,6 +165,8 @@ func NewServer(config Config, runtime Runtime) (Server, error) { Addr: s.config.Addr, Handler: s.handler, TLSConfig: s.config.TLSConfig, + // TODO(fuweid): allow user to configure streaming server + ReadHeaderTimeout: 30 * time.Minute, // Fix linter G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server } return s, nil diff --git a/pkg/cri/util/deep_copy.go b/pkg/cri/util/deep_copy.go index d0e0bf3..cf027ea 100644 --- a/pkg/cri/util/deep_copy.go +++ b/pkg/cri/util/deep_copy.go @@ -18,8 +18,8 @@ package util import ( "encoding/json" - - "github.com/pkg/errors" + "errors" + "fmt" ) // DeepCopy makes a deep copy from src into dst. @@ -32,11 +32,11 @@ func DeepCopy(dst interface{}, src interface{}) error { } bytes, err := json.Marshal(src) if err != nil { - return errors.Wrap(err, "unable to marshal src") + return fmt.Errorf("unable to marshal src: %w", err) } err = json.Unmarshal(bytes, dst) if err != nil { - return errors.Wrap(err, "unable to unmarshal into dst") + return fmt.Errorf("unable to unmarshal into dst: %w", err) } return nil } diff --git a/pkg/cri/util/id.go b/pkg/cri/util/id.go index 90f762b..194bab2 100644 --- a/pkg/cri/util/id.go +++ b/pkg/cri/util/id.go @@ -17,8 +17,8 @@ package util import ( + "crypto/rand" "encoding/hex" - "math/rand" ) // GenerateID generates a random unique id. diff --git a/pkg/dialer/dialer.go b/pkg/dialer/dialer.go index aa604ba..74c303b 100644 --- a/pkg/dialer/dialer.go +++ b/pkg/dialer/dialer.go @@ -18,10 +18,9 @@ package dialer import ( "context" + "fmt" "net" "time" - - "github.com/pkg/errors" ) type dialResult struct { @@ -74,6 +73,6 @@ func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) { dr.c.Close() } }() - return nil, errors.Errorf("dial %s: timeout", address) + return nil, fmt.Errorf("dial %s: timeout", address) } } diff --git a/pkg/dialer/dialer_unix.go b/pkg/dialer/dialer_unix.go index e7d1958..b4304ff 100644 --- a/pkg/dialer/dialer_unix.go +++ b/pkg/dialer/dialer_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/failpoint/fail.go b/pkg/failpoint/fail.go new file mode 100644 index 0000000..d616ec9 --- /dev/null +++ b/pkg/failpoint/fail.go @@ -0,0 +1,310 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package failpoint provides the code point in the path, which can be controlled +// by user's variable. +// +// Inspired by FreeBSD fail(9): https://freebsd.org/cgi/man.cgi?query=fail. +package failpoint + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync" + "time" +) + +// EvalFn is the func type about delegated evaluation. +type EvalFn func() error + +// Type is the type of failpoint to specifies which action to take. +type Type int + +const ( + // TypeInvalid is invalid type + TypeInvalid Type = iota + // TypeOff takes no action + TypeOff + // TypeError triggers failpoint error with specified argument + TypeError + // TypePanic triggers panic with specified argument + TypePanic + // TypeDelay sleeps with the specified number of milliseconds + TypeDelay +) + +// String returns the name of type. +func (t Type) String() string { + switch t { + case TypeOff: + return "off" + case TypeError: + return "error" + case TypePanic: + return "panic" + case TypeDelay: + return "delay" + default: + return "invalid" + } +} + +// Failpoint is used to add code points where error or panic may be injected by +// user. The user controlled variable will be parsed for how the error injected +// code should fire. There is the way to set the rule for failpoint. +// +// *[(arg)][->] +// +// The argument specifies which action to take; it can be one of: +// +// off: Takes no action (does not trigger failpoint and no argument) +// error: Triggers failpoint error with specified argument(string) +// panic: Triggers panic with specified argument(string) +// delay: Sleep the specified number of milliseconds +// +// The * modifiers prior to control when is executed. For +// example, "5*error(oops)" means "return error oops 5 times total". The +// operator -> can be used to express cascading terms. If you specify +// ->, it means that if does not execute, will +// be evaluated. If you want the error injected code should fire in second +// call, you can specify "1*off->1*error(oops)". +// +// Inspired by FreeBSD fail(9): https://freebsd.org/cgi/man.cgi?query=fail. +type Failpoint struct { + sync.Mutex + + fnName string + entries []*failpointEntry +} + +// NewFailpoint returns failpoint control. +func NewFailpoint(fnName string, terms string) (*Failpoint, error) { + entries, err := parseTerms([]byte(terms)) + if err != nil { + return nil, err + } + + return &Failpoint{ + fnName: fnName, + entries: entries, + }, nil +} + +// Evaluate evaluates a failpoint. +func (fp *Failpoint) Evaluate() error { + fn := fp.DelegatedEval() + return fn() +} + +// DelegatedEval evaluates a failpoint but delegates to caller to fire that. +func (fp *Failpoint) DelegatedEval() EvalFn { + var target *failpointEntry + + func() { + fp.Lock() + defer fp.Unlock() + + for _, entry := range fp.entries { + if entry.count == 0 { + continue + } + + entry.count-- + target = entry + break + } + }() + + if target == nil { + return nopEvalFn + } + return target.evaluate +} + +// Failpoint returns the current state of control in string format. +func (fp *Failpoint) Marshal() string { + fp.Lock() + defer fp.Unlock() + + res := make([]string, 0, len(fp.entries)) + for _, entry := range fp.entries { + res = append(res, entry.marshal()) + } + return strings.Join(res, "->") +} + +type failpointEntry struct { + typ Type + arg interface{} + count int64 +} + +func newFailpointEntry() *failpointEntry { + return &failpointEntry{ + typ: TypeInvalid, + count: 0, + } +} + +func (fpe *failpointEntry) marshal() string { + base := fmt.Sprintf("%d*%s", fpe.count, fpe.typ) + switch fpe.typ { + case TypeOff: + return base + case TypeError, TypePanic: + return fmt.Sprintf("%s(%s)", base, fpe.arg.(string)) + case TypeDelay: + return fmt.Sprintf("%s(%d)", base, fpe.arg.(time.Duration)/time.Millisecond) + default: + return base + } +} + +func (fpe *failpointEntry) evaluate() error { + switch fpe.typ { + case TypeOff: + return nil + case TypeError: + return fmt.Errorf("%v", fpe.arg) + case TypePanic: + panic(fpe.arg) + case TypeDelay: + time.Sleep(fpe.arg.(time.Duration)) + return nil + default: + panic("invalid failpoint type") + } +} + +func parseTerms(term []byte) ([]*failpointEntry, error) { + var entry *failpointEntry + var err error + + // count*type[(arg)] + term, entry, err = parseTerm(term) + if err != nil { + return nil, err + } + + res := []*failpointEntry{entry} + + // cascading terms + for len(term) > 0 { + if !bytes.HasPrefix(term, []byte("->")) { + return nil, fmt.Errorf("invalid cascading terms: %s", string(term)) + } + + term = term[2:] + term, entry, err = parseTerm(term) + if err != nil { + return nil, fmt.Errorf("failed to parse cascading term: %w", err) + } + + res = append(res, entry) + } + return res, nil +} + +func parseTerm(term []byte) ([]byte, *failpointEntry, error) { + var err error + var entry = newFailpointEntry() + + // count* + term, err = parseInt64(term, '*', &entry.count) + if err != nil { + return nil, nil, err + } + + // type[(arg)] + term, err = parseType(term, entry) + return term, entry, err +} + +func parseType(term []byte, entry *failpointEntry) ([]byte, error) { + var nameToTyp = map[string]Type{ + "off": TypeOff, + "error(": TypeError, + "panic(": TypePanic, + "delay(": TypeDelay, + } + + var found bool + for name, typ := range nameToTyp { + if bytes.HasPrefix(term, []byte(name)) { + found = true + term = term[len(name):] + entry.typ = typ + break + } + } + + if !found { + return nil, fmt.Errorf("invalid type format: %s", string(term)) + } + + switch entry.typ { + case TypePanic, TypeError: + endIdx := bytes.IndexByte(term, ')') + if endIdx <= 0 { + return nil, fmt.Errorf("invalid argument for %s type", entry.typ) + } + entry.arg = string(term[:endIdx]) + return term[endIdx+1:], nil + case TypeOff: + // do nothing + return term, nil + case TypeDelay: + var msVal int64 + var err error + + term, err = parseInt64(term, ')', &msVal) + if err != nil { + return nil, err + } + entry.arg = time.Millisecond * time.Duration(msVal) + return term, nil + default: + panic("unreachable") + } +} + +func parseInt64(term []byte, terminate byte, val *int64) ([]byte, error) { + i := 0 + + for ; i < len(term); i++ { + if b := term[i]; b < '0' || b > '9' { + break + } + } + + if i == 0 || i == len(term) || term[i] != terminate { + return nil, fmt.Errorf("failed to parse int64 because of invalid terminate byte: %s", string(term)) + } + + v, err := strconv.ParseInt(string(term[:i]), 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse int64 from %s: %v", string(term[:i]), err) + } + + *val = v + return term[i+1:], nil +} + +func nopEvalFn() error { + return nil +} diff --git a/pkg/failpoint/fail_test.go b/pkg/failpoint/fail_test.go new file mode 100644 index 0000000..1b79ff6 --- /dev/null +++ b/pkg/failpoint/fail_test.go @@ -0,0 +1,134 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package failpoint + +import ( + "reflect" + "testing" + "time" +) + +func TestParseTerms(t *testing.T) { + cases := []struct { + terms string + hasError bool + }{ + // off + {"5", true}, + {"*off()", true}, + {"5*off()", true}, + {"5*off(nothing)", true}, + {"5*off(", true}, + {"5*off", false}, + + // error + {"10000error(oops)", true}, + {"10*error(oops)", false}, + {"1234*error(oops))", true}, + {"12342*error()", true}, + + // panic + {"1panic(oops)", true}, + {"1000000*panic(oops)", false}, + {"12345*panic(oops))", true}, + {"12*panic()", true}, + + // delay + {"1*delay(oops)", true}, + {"1000000*delay(-1)", true}, + {"1000000*delay(1)", false}, + + // cascading terms + {"1*delay(1)-", true}, + {"10*delay(2)->", true}, + {"11*delay(3)->10*off(", true}, + {"12*delay(4)->10*of", true}, + {"13*delay(5)->10*off->1000*panic(oops)", false}, + } + + for i, c := range cases { + fp, err := NewFailpoint(t.Name(), c.terms) + + if (err != nil && !c.hasError) || + (err == nil && c.hasError) { + + t.Fatalf("[%v - %s] expected hasError=%v, but got %v", i, c.terms, c.hasError, err) + } + + if err != nil { + continue + } + + if got := fp.Marshal(); !reflect.DeepEqual(got, c.terms) { + t.Fatalf("[%v] expected %v, but got %v", i, c.terms, got) + } + } +} + +func TestEvaluate(t *testing.T) { + terms := "1*error(oops-)->1*off->1*delay(1000)->1*panic(panic)" + + fp, err := NewFailpoint(t.Name(), terms) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + injectedFn := func() error { + if err := fp.Evaluate(); err != nil { + return err + } + return nil + } + + // should return oops- error + if err := injectedFn(); err == nil || err.Error() != "oops-" { + t.Fatalf("expected error %v, but got %v", "oops-", err) + } + + // should return nil + if err := injectedFn(); err != nil { + t.Fatalf("expected nil, but got %v", err) + } + + // should sleep 1s and return nil + now := time.Now() + err = injectedFn() + du := time.Since(now) + if err != nil { + t.Fatalf("expected nil, but got %v", err) + } + if du < 1*time.Second { + t.Fatalf("expected sleep 1s, but got %v", du) + } + + // should panic + defer func() { + if err := recover(); err == nil || err.(string) != "panic" { + t.Fatalf("should panic(panic), but got %v", err) + } + + expected := "0*error(oops-)->0*off->0*delay(1000)->0*panic(panic)" + if got := fp.Marshal(); got != expected { + t.Fatalf("expected %v, but got %v", expected, got) + } + + if err := injectedFn(); err != nil { + t.Fatalf("expected nil, but got %v", err) + } + }() + injectedFn() +} diff --git a/pkg/ioutil/write_closer.go b/pkg/ioutil/write_closer.go index c816c51..97e1316 100644 --- a/pkg/ioutil/write_closer.go +++ b/pkg/ioutil/write_closer.go @@ -73,10 +73,10 @@ func (n *nopWriteCloser) Close() error { // serialWriteCloser wraps a write closer and makes sure all writes // are done in serial. // Parallel write won't intersect with each other. Use case: -// 1) Pipe: Write content longer than PIPE_BUF. -// See http://man7.org/linux/man-pages/man7/pipe.7.html -// 2) <3.14 Linux Kernel: write is not atomic -// See http://man7.org/linux/man-pages/man2/write.2.html +// 1. Pipe: Write content longer than PIPE_BUF. +// See http://man7.org/linux/man-pages/man7/pipe.7.html +// 2. <3.14 Linux Kernel: write is not atomic +// See http://man7.org/linux/man-pages/man2/write.2.html type serialWriteCloser struct { mu sync.Mutex wc io.WriteCloser diff --git a/pkg/ioutil/write_closer_test.go b/pkg/ioutil/write_closer_test.go index 25272a5..00b150d 100644 --- a/pkg/ioutil/write_closer_test.go +++ b/pkg/ioutil/write_closer_test.go @@ -17,7 +17,6 @@ package ioutil import ( - "io/ioutil" "os" "sort" "strconv" @@ -69,7 +68,7 @@ func TestSerialWriteCloser(t *testing.T) { testData[i] = []byte(repeatNumber(i, dataLen) + "\n") } - f, err := ioutil.TempFile("", "serial-write-closer") + f, err := os.CreateTemp("", "serial-write-closer") require.NoError(t, err) defer os.RemoveAll(f.Name()) defer f.Close() @@ -91,7 +90,7 @@ func TestSerialWriteCloser(t *testing.T) { wc.Close() // Check test result - content, err := ioutil.ReadFile(f.Name()) + content, err := os.ReadFile(f.Name()) require.NoError(t, err) resultData := strings.Split(strings.TrimSpace(string(content)), "\n") require.Len(t, resultData, goroutine) diff --git a/pkg/kmutex/kmutex.go b/pkg/kmutex/kmutex.go new file mode 100644 index 0000000..74846c0 --- /dev/null +++ b/pkg/kmutex/kmutex.go @@ -0,0 +1,105 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package kmutex provides synchronization primitives to lock/unlock resource by unique key. +package kmutex + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/sync/semaphore" +) + +// KeyedLocker is the interface for acquiring locks based on string. +type KeyedLocker interface { + Lock(ctx context.Context, key string) error + Unlock(key string) +} + +func New() KeyedLocker { + return newKeyMutex() +} + +func newKeyMutex() *keyMutex { + return &keyMutex{ + locks: make(map[string]*klock), + } +} + +type keyMutex struct { + mu sync.Mutex + + locks map[string]*klock +} + +type klock struct { + *semaphore.Weighted + ref int +} + +func (km *keyMutex) Lock(ctx context.Context, key string) error { + km.mu.Lock() + + l, ok := km.locks[key] + if !ok { + km.locks[key] = &klock{ + Weighted: semaphore.NewWeighted(1), + } + l = km.locks[key] + } + l.ref++ + km.mu.Unlock() + + if err := l.Acquire(ctx, 1); err != nil { + km.mu.Lock() + defer km.mu.Unlock() + + l.ref-- + + if l.ref < 0 { + panic(fmt.Errorf("kmutex: release of unlocked key %v", key)) + } + + if l.ref == 0 { + delete(km.locks, key) + } + return err + } + return nil +} + +func (km *keyMutex) Unlock(key string) { + km.mu.Lock() + defer km.mu.Unlock() + + l, ok := km.locks[key] + if !ok { + panic(fmt.Errorf("kmutex: unlock of unlocked key %v", key)) + } + l.Release(1) + + l.ref-- + + if l.ref < 0 { + panic(fmt.Errorf("kmutex: released of unlocked key %v", key)) + } + + if l.ref == 0 { + delete(km.locks, key) + } +} diff --git a/pkg/kmutex/kmutex_test.go b/pkg/kmutex/kmutex_test.go new file mode 100644 index 0000000..6bfff47 --- /dev/null +++ b/pkg/kmutex/kmutex_test.go @@ -0,0 +1,170 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package kmutex + +import ( + "context" + "runtime" + "strconv" + "sync" + "testing" + "time" + + "github.com/containerd/containerd/pkg/randutil" + "github.com/stretchr/testify/assert" +) + +func TestBasic(t *testing.T) { + t.Parallel() + + km := newKeyMutex() + ctx := context.Background() + + km.Lock(ctx, "c1") + km.Lock(ctx, "c2") + + assert.Equal(t, len(km.locks), 2) + assert.Equal(t, km.locks["c1"].ref, 1) + assert.Equal(t, km.locks["c2"].ref, 1) + + checkWaitFn := func(key string, num int) { + retries := 100 + waitLock := false + + for i := 0; i < retries; i++ { + // prevent from data-race + km.mu.Lock() + ref := km.locks[key].ref + km.mu.Unlock() + + if ref == num { + waitLock = true + break + } + time.Sleep(time.Duration(randutil.Int63n(100)) * time.Millisecond) + } + assert.Equal(t, waitLock, true) + } + + // should acquire successfully after release + { + waitCh := make(chan struct{}) + go func() { + defer close(waitCh) + + km.Lock(ctx, "c1") + }() + checkWaitFn("c1", 2) + + km.Unlock("c1") + + <-waitCh + assert.Equal(t, km.locks["c1"].ref, 1) + } + + // failed to acquire if context cancel + { + var errCh = make(chan error, 1) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + errCh <- km.Lock(ctx, "c1") + }() + + checkWaitFn("c1", 2) + + cancel() + assert.Equal(t, <-errCh, context.Canceled) + assert.Equal(t, km.locks["c1"].ref, 1) + } +} + +func TestReleasePanic(t *testing.T) { + t.Parallel() + + km := newKeyMutex() + + defer func() { + if recover() == nil { + t.Fatal("release of unlocked key did not panic") + } + }() + + km.Unlock(t.Name()) +} + +func TestMultileAcquireOnKeys(t *testing.T) { + t.Parallel() + + km := newKeyMutex() + nloops := 10000 + nproc := runtime.GOMAXPROCS(0) + ctx := context.Background() + + var wg sync.WaitGroup + for i := 0; i < nproc; i++ { + wg.Add(1) + + go func(key string) { + defer wg.Done() + + for i := 0; i < nloops; i++ { + km.Lock(ctx, key) + + time.Sleep(time.Duration(randutil.Int63n(100)) * time.Nanosecond) + + km.Unlock(key) + } + }("key-" + strconv.Itoa(i)) + } + wg.Wait() +} + +func TestMultiAcquireOnSameKey(t *testing.T) { + t.Parallel() + + km := newKeyMutex() + key := "c1" + ctx := context.Background() + + assert.Nil(t, km.Lock(ctx, key)) + + nproc := runtime.GOMAXPROCS(0) + nloops := 10000 + + var wg sync.WaitGroup + for i := 0; i < nproc; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + for i := 0; i < nloops; i++ { + km.Lock(ctx, key) + + time.Sleep(time.Duration(randutil.Int63n(100)) * time.Nanosecond) + + km.Unlock(key) + } + }() + } + km.Unlock(key) + wg.Wait() + + // c1 key has been released so the it should not have any klock. + assert.Equal(t, len(km.locks), 0) +} diff --git a/vendor/github.com/containerd/btrfs/ioctl.go b/pkg/kmutex/noop.go similarity index 74% rename from vendor/github.com/containerd/btrfs/ioctl.go rename to pkg/kmutex/noop.go index bac1dbd..66c46f1 100644 --- a/vendor/github.com/containerd/btrfs/ioctl.go +++ b/pkg/kmutex/noop.go @@ -14,14 +14,20 @@ limitations under the License. */ -package btrfs +package kmutex -import "syscall" +import "context" -func ioctl(fd, request, args uintptr) error { - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, request, args) - if errno != 0 { - return errno - } +func NewNoop() KeyedLocker { + return &noopMutex{} +} + +type noopMutex struct { +} + +func (*noopMutex) Lock(_ context.Context, _ string) error { return nil } + +func (*noopMutex) Unlock(_ string) { +} diff --git a/pkg/netns/netns_linux.go b/pkg/netns/netns_linux.go index d58f8ab..03f68a5 100644 --- a/pkg/netns/netns_linux.go +++ b/pkg/netns/netns_linux.go @@ -42,7 +42,6 @@ import ( "github.com/containerd/containerd/mount" cnins "github.com/containernetworking/plugins/pkg/ns" "github.com/moby/sys/symlink" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -53,8 +52,10 @@ import ( // path to the network namespace. func newNS(baseDir string) (nsPath string, err error) { b := make([]byte, 16) - if _, err := rand.Reader.Read(b); err != nil { - return "", errors.Wrap(err, "failed to generate random netns name") + + _, err = rand.Read(b) + if err != nil { + return "", fmt.Errorf("failed to generate random netns name: %w", err) } // Create the directory for mounting network namespaces @@ -64,10 +65,10 @@ func newNS(baseDir string) (nsPath string, err error) { return "", err } - // create an empty file at the mount point + // create an empty file at the mount point and fail if it already exists nsName := fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) nsPath = path.Join(baseDir, nsName) - mountPointFd, err := os.Create(nsPath) + mountPointFd, err := os.OpenFile(nsPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) if err != nil { return "", err } @@ -76,7 +77,7 @@ func newNS(baseDir string) (nsPath string, err error) { defer func() { // Ensure the mount point is cleaned up on errors if err != nil { - os.RemoveAll(nsPath) // nolint: errcheck + os.RemoveAll(nsPath) } }() @@ -106,20 +107,20 @@ func newNS(baseDir string) (nsPath string, err error) { } // Put this thread back to the orig ns, since it might get reused (pre go1.10) - defer origNS.Set() // nolint: errcheck + defer origNS.Set() // bind mount the netns from the current thread (from /proc) onto the // mount point. This causes the namespace to persist, even when there // are no threads in the ns. err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "") if err != nil { - err = errors.Wrapf(err, "failed to bind mount ns at %s", nsPath) + err = fmt.Errorf("failed to bind mount ns at %s: %w", nsPath, err) } })() wg.Wait() if err != nil { - return "", errors.Wrap(err, "failed to create namespace") + return "", fmt.Errorf("failed to create namespace: %w", err) } return nsPath, nil @@ -131,17 +132,17 @@ func unmountNS(path string) error { if os.IsNotExist(err) { return nil } - return errors.Wrap(err, "failed to stat netns") + return fmt.Errorf("failed to stat netns: %w", err) } path, err := symlink.FollowSymlinkInScope(path, "/") if err != nil { - return errors.Wrap(err, "failed to follow symlink") + return fmt.Errorf("failed to follow symlink: %w", err) } if err := mount.Unmount(path, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to umount netns") + return fmt.Errorf("failed to umount netns: %w", err) } if err := os.RemoveAll(path); err != nil { - return errors.Wrap(err, "failed to remove netns") + return fmt.Errorf("failed to remove netns: %w", err) } return nil } @@ -163,7 +164,7 @@ type NetNS struct { func NewNetNS(baseDir string) (*NetNS, error) { path, err := newNS(baseDir) if err != nil { - return nil, errors.Wrap(err, "failed to setup netns") + return nil, fmt.Errorf("failed to setup netns: %w", err) } return &NetNS{path: path}, nil } @@ -190,14 +191,14 @@ func (n *NetNS) Closed() (bool, error) { if _, ok := err.(cnins.NSPathNotNSErr); ok { // The network namespace is not mounted, remove it. if err := os.RemoveAll(n.path); err != nil { - return false, errors.Wrap(err, "remove netns") + return false, fmt.Errorf("remove netns: %w", err) } return true, nil } - return false, errors.Wrap(err, "get netns fd") + return false, fmt.Errorf("get netns fd: %w", err) } if err := ns.Close(); err != nil { - return false, errors.Wrap(err, "close netns fd") + return false, fmt.Errorf("close netns fd: %w", err) } return false, nil } @@ -211,8 +212,8 @@ func (n *NetNS) GetPath() string { func (n *NetNS) Do(f func(cnins.NetNS) error) error { ns, err := cnins.GetNS(n.path) if err != nil { - return errors.Wrap(err, "get netns fd") + return fmt.Errorf("get netns fd: %w", err) } - defer ns.Close() // nolint: errcheck + defer ns.Close() return ns.Do(f) } diff --git a/pkg/netns/netns_other.go b/pkg/netns/netns_other.go index e2000bf..ec8124c 100644 --- a/pkg/netns/netns_other.go +++ b/pkg/netns/netns_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -19,7 +20,7 @@ package netns import ( - "github.com/pkg/errors" + "errors" ) var errNotImplementedOnUnix = errors.New("not implemented on unix") diff --git a/pkg/netns/netns_windows.go b/pkg/netns/netns_windows.go index 1386f53..de02094 100644 --- a/pkg/netns/netns_windows.go +++ b/pkg/netns/netns_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/pkg/oom/oom.go b/pkg/oom/oom.go index 6d5bf27..bbc46f8 100644 --- a/pkg/oom/oom.go +++ b/pkg/oom/oom.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/pkg/oom/v1/v1.go b/pkg/oom/v1/v1.go index be8de74..1e8e670 100644 --- a/pkg/oom/v1/v1.go +++ b/pkg/oom/v1/v1.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,6 +21,7 @@ package v1 import ( "context" + "fmt" "sync" "github.com/containerd/cgroups" @@ -27,7 +29,6 @@ import ( "github.com/containerd/containerd/pkg/oom" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/v2/shim" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -92,7 +93,7 @@ func (e *epoller) Run(ctx context.Context) { func (e *epoller) Add(id string, cgx interface{}) error { cg, ok := cgx.(cgroups.Cgroup) if !ok { - return errors.Errorf("expected cgroups.Cgroup, got: %T", cgx) + return fmt.Errorf("expected cgroups.Cgroup, got: %T", cgx) } e.mu.Lock() defer e.mu.Unlock() diff --git a/pkg/oom/v2/v2.go b/pkg/oom/v2/v2.go index a956b1c..dbcb443 100644 --- a/pkg/oom/v2/v2.go +++ b/pkg/oom/v2/v2.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,13 +21,13 @@ package v2 import ( "context" + "fmt" cgroupsv2 "github.com/containerd/cgroups/v2" eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/pkg/oom" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/v2/shim" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -70,15 +71,15 @@ func (w *watcher) Run(ctx context.Context) { continue } lastOOM := lastOOMMap[i.id] - if i.ev.OOM > lastOOM { + if i.ev.OOMKill > lastOOM { if err := w.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &eventstypes.TaskOOM{ ContainerID: i.id, }); err != nil { logrus.WithError(err).Error("publish OOM event") } } - if i.ev.OOM > 0 { - lastOOMMap[i.id] = i.ev.OOM + if i.ev.OOMKill > 0 { + lastOOMMap[i.id] = i.ev.OOMKill } } } @@ -88,7 +89,7 @@ func (w *watcher) Run(ctx context.Context) { func (w *watcher) Add(id string, cgx interface{}) error { cg, ok := cgx.(*cgroupsv2.Manager) if !ok { - return errors.Errorf("expected *cgroupsv2.Manager, got: %T", cgx) + return fmt.Errorf("expected *cgroupsv2.Manager, got: %T", cgx) } // FIXME: cgroupsv2.Manager does not support closing eventCh routine currently. // The routine shuts down when an error happens, mostly when the cgroup is deleted. @@ -101,10 +102,13 @@ func (w *watcher) Add(id string, cgx interface{}) error { i.ev = ev w.itemCh <- i case err := <-errCh: - i.err = err - w.itemCh <- i - // we no longer get any event/err when we got an err - logrus.WithError(err).Warn("error from *cgroupsv2.Manager.EventChan") + // channel is closed when cgroup gets deleted + if err != nil { + i.err = err + w.itemCh <- i + // we no longer get any event/err when we got an err + logrus.WithError(err).Warn("error from *cgroupsv2.Manager.EventChan") + } return } } diff --git a/pkg/os/mount_other.go b/pkg/os/mount_other.go index 3a778d0..eb85004 100644 --- a/pkg/os/mount_other.go +++ b/pkg/os/mount_other.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* diff --git a/pkg/os/mount_unix.go b/pkg/os/mount_unix.go index e81def3..27ec8a9 100644 --- a/pkg/os/mount_unix.go +++ b/pkg/os/mount_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* diff --git a/pkg/os/os.go b/pkg/os/os.go index b84d423..3a9d815 100644 --- a/pkg/os/os.go +++ b/pkg/os/os.go @@ -18,7 +18,6 @@ package os import ( "io" - "io/ioutil" "os" "github.com/moby/sys/symlink" @@ -78,9 +77,9 @@ func (RealOS) CopyFile(src, dest string, perm os.FileMode) error { return err } -// WriteFile will call ioutil.WriteFile to write data into a file. +// WriteFile will call os.WriteFile to write data into a file. func (RealOS) WriteFile(filename string, data []byte, perm os.FileMode) error { - return ioutil.WriteFile(filename, data, perm) + return os.WriteFile(filename, data, perm) } // Hostname will call os.Hostname to get the hostname of the host. diff --git a/pkg/os/os_unix.go b/pkg/os/os_unix.go index 6b3b857..429a901 100644 --- a/pkg/os/os_unix.go +++ b/pkg/os/os_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/os/os_windows.go b/pkg/os/os_windows.go index 58bb49c..6047e2f 100644 --- a/pkg/os/os_windows.go +++ b/pkg/os/os_windows.go @@ -29,9 +29,10 @@ import ( // It works for both file and directory paths. // // We are not able to use builtin Go functionality for opening a directory path: -// - os.Open on a directory returns a os.File where Fd() is a search handle from FindFirstFile. -// - syscall.Open does not provide a way to specify FILE_FLAG_BACKUP_SEMANTICS, which is needed to -// open a directory. +// - os.Open on a directory returns a os.File where Fd() is a search handle from FindFirstFile. +// - syscall.Open does not provide a way to specify FILE_FLAG_BACKUP_SEMANTICS, which is needed to +// open a directory. +// // We could use os.Open if the path is a file, but it's easier to just use the same code for both. // Therefore, we call windows.CreateFile directly. func openPath(path string) (windows.Handle, error) { @@ -58,7 +59,8 @@ func openPath(path string) (windows.Handle, error) { } // GetFinalPathNameByHandle flags. -//nolint:golint +// +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( cFILE_NAME_OPENED = 0x8 diff --git a/pkg/os/os_windows_test.go b/pkg/os/os_windows_test.go index 4ac5a8e..b54531e 100644 --- a/pkg/os/os_windows_test.go +++ b/pkg/os/os_windows_test.go @@ -19,7 +19,6 @@ package os import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -162,7 +161,7 @@ func setupVHDVolume(t *testing.T, vhdPath string) string { } func writeFile(t *testing.T, path string, content []byte) { - if err := ioutil.WriteFile(path, content, 0644); err != nil { + if err := os.WriteFile(path, content, 0644); err != nil { t.Fatal(err) } } diff --git a/pkg/os/testing/fake_os_unix.go b/pkg/os/testing/fake_os_unix.go index 9a16394..2cc3c16 100644 --- a/pkg/os/testing/fake_os_unix.go +++ b/pkg/os/testing/fake_os_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/process/deleted_state.go b/pkg/process/deleted_state.go index eb7baf7..4e62b30 100644 --- a/pkg/process/deleted_state.go +++ b/pkg/process/deleted_state.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,46 +21,47 @@ package process import ( "context" + "errors" + "fmt" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" google_protobuf "github.com/gogo/protobuf/types" - "github.com/pkg/errors" ) type deletedState struct { } func (s *deletedState) Pause(ctx context.Context) error { - return errors.Errorf("cannot pause a deleted process") + return errors.New("cannot pause a deleted process") } func (s *deletedState) Resume(ctx context.Context) error { - return errors.Errorf("cannot resume a deleted process") + return errors.New("cannot resume a deleted process") } func (s *deletedState) Update(context context.Context, r *google_protobuf.Any) error { - return errors.Errorf("cannot update a deleted process") + return errors.New("cannot update a deleted process") } func (s *deletedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { - return errors.Errorf("cannot checkpoint a deleted process") + return errors.New("cannot checkpoint a deleted process") } func (s *deletedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a deleted process") + return errors.New("cannot resize a deleted process") } func (s *deletedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a deleted process") + return errors.New("cannot start a deleted process") } func (s *deletedState) Delete(ctx context.Context) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot delete a deleted process") + return fmt.Errorf("cannot delete a deleted process: %w", errdefs.ErrNotFound) } func (s *deletedState) Kill(ctx context.Context, sig uint32, all bool) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot kill a deleted process") + return fmt.Errorf("cannot kill a deleted process: %w", errdefs.ErrNotFound) } func (s *deletedState) SetExited(status int) { @@ -67,7 +69,7 @@ func (s *deletedState) SetExited(status int) { } func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { - return nil, errors.Errorf("cannot exec in a deleted state") + return nil, errors.New("cannot exec in a deleted state") } func (s *deletedState) Status(ctx context.Context) (string, error) { diff --git a/pkg/process/exec.go b/pkg/process/exec.go index 7790a49..dcd7592 100644 --- a/pkg/process/exec.go +++ b/pkg/process/exec.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -36,7 +37,6 @@ import ( "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) type execProcess struct { @@ -145,12 +145,12 @@ func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { pid := e.pid.get() switch { case pid == 0: - return errors.Wrap(errdefs.ErrFailedPrecondition, "process not created") + return fmt.Errorf("process not created: %w", errdefs.ErrFailedPrecondition) case !e.exited.IsZero(): - return errors.Wrapf(errdefs.ErrNotFound, "process already finished") + return fmt.Errorf("process already finished: %w", errdefs.ErrNotFound) default: if err := unix.Kill(pid, syscall.Signal(sig)); err != nil { - return errors.Wrapf(checkKillError(err), "exec kill error") + return fmt.Errorf("exec kill error: %w", checkKillError(err)) } } return nil @@ -186,12 +186,12 @@ func (e *execProcess) start(ctx context.Context) (err error) { ) if e.stdio.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create runc console socket") + return fmt.Errorf("failed to create runc console socket: %w", err) } defer socket.Close() } else { if pio, err = createIO(ctx, e.id, e.parent.IoUID, e.parent.IoGID, e.stdio); err != nil { - return errors.Wrap(err, "failed to create init process I/O") + return fmt.Errorf("failed to create init process I/O: %w", err) } e.io = pio } @@ -219,19 +219,19 @@ func (e *execProcess) start(ctx context.Context) (err error) { if socket != nil { console, err := socket.ReceiveMaster() if err != nil { - return errors.Wrap(err, "failed to retrieve console master") + return fmt.Errorf("failed to retrieve console master: %w", err) } if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.id, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg); err != nil { - return errors.Wrap(err, "failed to start console copy") + return fmt.Errorf("failed to start console copy: %w", err) } } else { if err := pio.Copy(ctx, &e.wg); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") + return fmt.Errorf("failed to start io pipe copy: %w", err) } } pid, err := pidFile.Read() if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime exec pid") + return fmt.Errorf("failed to retrieve OCI runtime exec pi: %wd", err) } e.pid.pid = pid return nil @@ -240,7 +240,7 @@ func (e *execProcess) start(ctx context.Context) (err error) { func (e *execProcess) openStdin(path string) error { sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", path) + return fmt.Errorf("failed to open stdin fifo %s: %w", path, err) } e.stdin = sc e.closers = append(e.closers, sc) diff --git a/pkg/process/exec_state.go b/pkg/process/exec_state.go index c97b400..4c3dd8f 100644 --- a/pkg/process/exec_state.go +++ b/pkg/process/exec_state.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,9 +21,10 @@ package process import ( "context" + "errors" + "fmt" "github.com/containerd/console" - "github.com/pkg/errors" ) type execState interface { @@ -47,7 +49,7 @@ func (s *execCreatedState) transition(name string) error { case "deleted": s.p.execState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -96,7 +98,7 @@ func (s *execRunningState) transition(name string) error { case "stopped": s.p.execState = &execStoppedState{p: s.p} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -106,11 +108,11 @@ func (s *execRunningState) Resize(ws console.WinSize) error { } func (s *execRunningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") + return errors.New("cannot start a running process") } func (s *execRunningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") + return errors.New("cannot delete a running process") } func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error { @@ -138,17 +140,17 @@ func (s *execStoppedState) transition(name string) error { case "deleted": s.p.execState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *execStoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") + return errors.New("cannot resize a stopped container") } func (s *execStoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") + return errors.New("cannot start a stopped process") } func (s *execStoppedState) Delete(ctx context.Context) error { diff --git a/pkg/process/init.go b/pkg/process/init.go index a946238..26aebdc 100644 --- a/pkg/process/init.go +++ b/pkg/process/init.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -37,7 +38,6 @@ import ( runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -119,12 +119,12 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { if r.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create OCI runtime console socket") + return fmt.Errorf("failed to create OCI runtime console socket: %w", err) } defer socket.Close() } else { if pio, err = createIO(ctx, p.id, p.IoUID, p.IoGID, p.stdio); err != nil { - return errors.Wrap(err, "failed to create init process I/O") + return fmt.Errorf("failed to create init process I/O: %w", err) } p.io = pio } @@ -155,21 +155,21 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { if socket != nil { console, err := socket.ReceiveMaster() if err != nil { - return errors.Wrap(err, "failed to retrieve console master") + return fmt.Errorf("failed to retrieve console master: %w", err) } console, err = p.Platform.CopyConsole(ctx, console, p.id, r.Stdin, r.Stdout, r.Stderr, &p.wg) if err != nil { - return errors.Wrap(err, "failed to start console copy") + return fmt.Errorf("failed to start console copy: %w", err) } p.console = console } else { if err := pio.Copy(ctx, &p.wg); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") + return fmt.Errorf("failed to start io pipe copy: %w", err) } } pid, err := pidFile.Read() if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime container pid") + return fmt.Errorf("failed to retrieve OCI runtime container pid: %w", err) } p.pid = pid return nil @@ -178,7 +178,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { func (p *Init) openStdin(path string) error { sc, err := fifo.OpenFifo(context.Background(), path, unix.O_WRONLY|unix.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", path) + return fmt.Errorf("failed to open stdin fifo %s: %w", path, err) } p.stdin = sc p.closers = append(p.closers, sc) @@ -312,7 +312,7 @@ func (p *Init) delete(ctx context.Context) error { if err2 := mount.UnmountAll(p.Rootfs, 0); err2 != nil { log.G(ctx).WithError(err2).Warn("failed to cleanup rootfs mount") if err == nil { - err = errors.Wrap(err2, "failed rootfs umount") + err = fmt.Errorf("failed rootfs umount: %w", err2) } } return err @@ -481,11 +481,11 @@ func (p *Init) runtimeError(rErr error, msg string) error { rMsg, err := getLastRuntimeError(p.runtime) switch { case err != nil: - return errors.Wrapf(rErr, "%s: %s (%s)", msg, "unable to retrieve OCI runtime error", err.Error()) + return fmt.Errorf("%s: %s (%s): %w", msg, "unable to retrieve OCI runtime error", err.Error(), rErr) case rMsg == "": - return errors.Wrap(rErr, msg) + return fmt.Errorf("%s: %w", msg, rErr) default: - return errors.Errorf("%s: %s", msg, rMsg) + return fmt.Errorf("%s: %s", msg, rMsg) } } diff --git a/pkg/process/init_state.go b/pkg/process/init_state.go index 5273a5d..76c9149 100644 --- a/pkg/process/init_state.go +++ b/pkg/process/init_state.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,10 +21,11 @@ package process import ( "context" + "errors" + "fmt" runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -53,17 +55,17 @@ func (s *createdState) transition(name string) error { case "deleted": s.p.initState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *createdState) Pause(ctx context.Context) error { - return errors.Errorf("cannot pause task in created state") + return errors.New("cannot pause task in created state") } func (s *createdState) Resume(ctx context.Context) error { - return errors.Errorf("cannot resume task in created state") + return errors.New("cannot resume task in created state") } func (s *createdState) Update(ctx context.Context, r *google_protobuf.Any) error { @@ -71,7 +73,7 @@ func (s *createdState) Update(ctx context.Context, r *google_protobuf.Any) error } func (s *createdState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { - return errors.Errorf("cannot checkpoint a task in created state") + return errors.New("cannot checkpoint a task in created state") } func (s *createdState) Start(ctx context.Context) error { @@ -122,17 +124,17 @@ func (s *createdCheckpointState) transition(name string) error { case "deleted": s.p.initState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *createdCheckpointState) Pause(ctx context.Context) error { - return errors.Errorf("cannot pause task in created state") + return errors.New("cannot pause task in created state") } func (s *createdCheckpointState) Resume(ctx context.Context) error { - return errors.Errorf("cannot resume task in created state") + return errors.New("cannot resume task in created state") } func (s *createdCheckpointState) Update(ctx context.Context, r *google_protobuf.Any) error { @@ -140,7 +142,7 @@ func (s *createdCheckpointState) Update(ctx context.Context, r *google_protobuf. } func (s *createdCheckpointState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { - return errors.Errorf("cannot checkpoint a task in created state") + return errors.New("cannot checkpoint a task in created state") } func (s *createdCheckpointState) Start(ctx context.Context) error { @@ -153,7 +155,7 @@ func (s *createdCheckpointState) Start(ctx context.Context) error { ) if sio.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create OCI runtime console socket") + return fmt.Errorf("failed to create OCI runtime console socket: %w", err) } defer socket.Close() s.opts.ConsoleSocket = socket @@ -164,27 +166,27 @@ func (s *createdCheckpointState) Start(ctx context.Context) error { } if sio.Stdin != "" { if err := p.openStdin(sio.Stdin); err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", sio.Stdin) + return fmt.Errorf("failed to open stdin fifo %s: %w", sio.Stdin, err) } } if socket != nil { console, err := socket.ReceiveMaster() if err != nil { - return errors.Wrap(err, "failed to retrieve console master") + return fmt.Errorf("failed to retrieve console master: %w", err) } console, err = p.Platform.CopyConsole(ctx, console, p.id, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg) if err != nil { - return errors.Wrap(err, "failed to start console copy") + return fmt.Errorf("failed to start console copy: %w", err) } p.console = console } else { if err := p.io.Copy(ctx, &p.wg); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") + return fmt.Errorf("failed to start io pipe copy: %w", err) } } pid, err := runc.ReadPidFile(s.opts.PidFile) if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime container pid") + return fmt.Errorf("failed to retrieve OCI runtime container pid: %w", err) } p.pid = pid return s.transition("running") @@ -210,7 +212,7 @@ func (s *createdCheckpointState) SetExited(status int) { } func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { - return nil, errors.Errorf("cannot exec in a created state") + return nil, errors.New("cannot exec in a created state") } func (s *createdCheckpointState) Status(ctx context.Context) (string, error) { @@ -228,7 +230,7 @@ func (s *runningState) transition(name string) error { case "paused": s.p.initState = &pausedState{p: s.p} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -249,7 +251,7 @@ func (s *runningState) Pause(ctx context.Context) error { } func (s *runningState) Resume(ctx context.Context) error { - return errors.Errorf("cannot resume a running process") + return errors.New("cannot resume a running process") } func (s *runningState) Update(ctx context.Context, r *google_protobuf.Any) error { @@ -261,11 +263,11 @@ func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro } func (s *runningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") + return errors.New("cannot start a running process") } func (s *runningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") + return errors.New("cannot delete a running process") } func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error { @@ -299,13 +301,13 @@ func (s *pausedState) transition(name string) error { case "stopped": s.p.initState = &stoppedState{p: s.p} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *pausedState) Pause(ctx context.Context) error { - return errors.Errorf("cannot pause a paused container") + return errors.New("cannot pause a paused container") } func (s *pausedState) Resume(ctx context.Context) error { @@ -325,11 +327,11 @@ func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error } func (s *pausedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a paused process") + return errors.New("cannot start a paused process") } func (s *pausedState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a paused process") + return errors.New("cannot delete a paused process") } func (s *pausedState) Kill(ctx context.Context, sig uint32, all bool) error { @@ -349,7 +351,7 @@ func (s *pausedState) SetExited(status int) { } func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { - return nil, errors.Errorf("cannot exec in a paused state") + return nil, errors.New("cannot exec in a paused state") } func (s *pausedState) Status(ctx context.Context) (string, error) { @@ -365,29 +367,29 @@ func (s *stoppedState) transition(name string) error { case "deleted": s.p.initState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *stoppedState) Pause(ctx context.Context) error { - return errors.Errorf("cannot pause a stopped container") + return errors.New("cannot pause a stopped container") } func (s *stoppedState) Resume(ctx context.Context) error { - return errors.Errorf("cannot resume a stopped container") + return errors.New("cannot resume a stopped container") } func (s *stoppedState) Update(ctx context.Context, r *google_protobuf.Any) error { - return errors.Errorf("cannot update a stopped container") + return errors.New("cannot update a stopped container") } func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { - return errors.Errorf("cannot checkpoint a stopped container") + return errors.New("cannot checkpoint a stopped container") } func (s *stoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") + return errors.New("cannot start a stopped process") } func (s *stoppedState) Delete(ctx context.Context) error { @@ -406,7 +408,7 @@ func (s *stoppedState) SetExited(status int) { } func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { - return nil, errors.Errorf("cannot exec in a stopped state") + return nil, errors.New("cannot exec in a stopped state") } func (s *stoppedState) Status(ctx context.Context) (string, error) { diff --git a/pkg/process/io.go b/pkg/process/io.go index d1c5b96..ed9b1da 100644 --- a/pkg/process/io.go +++ b/pkg/process/io.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -24,7 +25,6 @@ import ( "io" "net/url" "os" - "os/exec" "path/filepath" "sync" "sync/atomic" @@ -37,7 +37,7 @@ import ( "github.com/containerd/fifo" runc "github.com/containerd/go-runc" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) const binaryIOProcTermTimeout = 12 * time.Second // Give logger process solid 10 seconds for cleanup @@ -76,7 +76,7 @@ func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error { } var cwg sync.WaitGroup if err := copyPipes(ctx, p.IO(), p.stdio.Stdin, p.stdio.Stdout, p.stdio.Stderr, wg, &cwg); err != nil { - return errors.Wrap(err, "unable to copy pipes") + return fmt.Errorf("unable to copy pipes: %w", err) } cwg.Wait() return nil @@ -96,7 +96,7 @@ func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio stdio.Stdi } u, err := url.Parse(stdio.Stdout) if err != nil { - return nil, errors.Wrap(err, "unable to parse stdout uri") + return nil, fmt.Errorf("unable to parse stdout uri: %w", err) } if u.Scheme == "" { u.Scheme = "fifo" @@ -124,7 +124,7 @@ func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio stdio.Stdi pio.copy = true pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) default: - return nil, errors.Errorf("unknown STDIO scheme %s", u.Scheme) + return nil, fmt.Errorf("unknown STDIO scheme %s", u.Scheme) } if err != nil { return nil, err @@ -188,10 +188,10 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w ) if ok { if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil { - return errors.Wrapf(err, "containerd-shim: opening w/o fifo %q failed", i.name) + return fmt.Errorf("containerd-shim: opening w/o fifo %q failed: %w", i.name, err) } if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil { - return errors.Wrapf(err, "containerd-shim: opening r/o fifo %q failed", i.name) + return fmt.Errorf("containerd-shim: opening r/o fifo %q failed: %w", i.name, err) } } else { if sameFile != nil { @@ -200,7 +200,7 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w continue } if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil { - return errors.Wrapf(err, "containerd-shim: opening file %q failed", i.name) + return fmt.Errorf("containerd-shim: opening file %q failed: %w", i.name, err) } if stdout == stderr { sameFile = &countingWriteCloser{ @@ -265,13 +265,13 @@ func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (_ runc.IO, err e out, err := newPipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stdout pipes") + return nil, fmt.Errorf("failed to create stdout pipes: %w", err) } closers = append(closers, out.Close) serr, err := newPipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stderr pipes") + return nil, fmt.Errorf("failed to create stderr pipes: %w", err) } closers = append(closers, serr.Close) @@ -286,19 +286,19 @@ func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (_ runc.IO, err e // don't need to register this with the reaper or wait when // running inside a shim if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start binary process") + return nil, fmt.Errorf("failed to start binary process: %w", err) } closers = append(closers, func() error { return cmd.Process.Kill() }) // close our side of the pipe after start if err := w.Close(); err != nil { - return nil, errors.Wrap(err, "failed to close write pipe after start") + return nil, fmt.Errorf("failed to close write pipe after start: %w", err) } // wait for the logging binary to be ready b := make([]byte, 1) if _, err := r.Read(b); err != nil && err != io.EOF { - return nil, errors.Wrap(err, "failed to read from logging binary") + return nil, fmt.Errorf("failed to read from logging binary: %w", err) } return &binaryIO{ @@ -356,12 +356,12 @@ func (b *binaryIO) cancel() error { // Send SIGTERM first, so logger process has a chance to flush and exit properly if err := b.cmd.Process.Signal(syscall.SIGTERM); err != nil { - result := multierror.Append(errors.Wrap(err, "failed to send SIGTERM")) + result := multierror.Append(fmt.Errorf("failed to send SIGTERM: %w", err)) log.L.WithError(err).Warn("failed to send SIGTERM signal, killing logging shim") if err := b.cmd.Process.Kill(); err != nil { - result = multierror.Append(result, errors.Wrap(err, "failed to kill process after faulty SIGTERM")) + result = multierror.Append(result, fmt.Errorf("failed to kill process after faulty SIGTERM: %w", err)) } return result.ErrorOrNil() @@ -380,7 +380,7 @@ func (b *binaryIO) cancel() error { err := b.cmd.Process.Kill() if err != nil { - return errors.Wrap(err, "failed to kill shim logger process") + return fmt.Errorf("failed to kill shim logger process: %w", err) } return nil @@ -428,11 +428,11 @@ func (p *pipe) Close() error { var result *multierror.Error if err := p.w.Close(); err != nil { - result = multierror.Append(result, errors.Wrap(err, "failed to close write pipe")) + result = multierror.Append(result, fmt.Errorf("failed to close write pipe: %w", err)) } if err := p.r.Close(); err != nil { - result = multierror.Append(result, errors.Wrap(err, "failed to close read pipe")) + result = multierror.Append(result, fmt.Errorf("failed to close read pipe: %w", err)) } return multierror.Prefix(result.ErrorOrNil(), "pipe:") diff --git a/pkg/process/io_test.go b/pkg/process/io_test.go index 21b4144..7c7574e 100644 --- a/pkg/process/io_test.go +++ b/pkg/process/io_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,8 +21,8 @@ package process import ( "context" - "io/ioutil" "net/url" + "os" "testing" "github.com/containerd/containerd/namespaces" @@ -67,6 +68,6 @@ func TestNewBinaryIOCleanup(t *testing.T) { func descriptorCount(t *testing.T) int { t.Helper() - files, _ := ioutil.ReadDir("/proc/self/fd") + files, _ := os.ReadDir("/proc/self/fd") return len(files) } diff --git a/pkg/process/io_util.go b/pkg/process/io_util.go index 72bbf92..e814c11 100644 --- a/pkg/process/io_util.go +++ b/pkg/process/io_util.go @@ -19,7 +19,8 @@ package process import ( "net/url" "os" - "os/exec" + + exec "golang.org/x/sys/execabs" ) // NewBinaryCmd returns a Cmd to be used to start a logging binary. diff --git a/pkg/process/utils.go b/pkg/process/utils.go index 6536ac5..afada02 100644 --- a/pkg/process/utils.go +++ b/pkg/process/utils.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -32,7 +33,6 @@ import ( "github.com/containerd/containerd/errdefs" runc "github.com/containerd/go-runc" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -136,11 +136,11 @@ func checkKillError(err error) error { strings.Contains(err.Error(), "container not running") || strings.Contains(strings.ToLower(err.Error()), "no such process") || err == unix.ESRCH { - return errors.Wrapf(errdefs.ErrNotFound, "process already finished") + return fmt.Errorf("process already finished: %w", errdefs.ErrNotFound) } else if strings.Contains(err.Error(), "does not exist") { - return errors.Wrapf(errdefs.ErrNotFound, "no such container") + return fmt.Errorf("no such container: %w", errdefs.ErrNotFound) } - return errors.Wrapf(err, "unknown error after kill") + return fmt.Errorf("unknown error after kill: %w", err) } func newPidFile(bundle string) *pidFile { @@ -198,5 +198,5 @@ func stateName(v interface{}) string { case *stoppedState: return "stopped" } - panic(errors.Errorf("invalid state %v", v)) + panic(fmt.Errorf("invalid state %v", v)) } diff --git a/pkg/progress/escape.go b/pkg/progress/escape.go index d9ce5b0..394686f 100644 --- a/pkg/progress/escape.go +++ b/pkg/progress/escape.go @@ -19,6 +19,6 @@ package progress const ( escape = "\x1b" reset = escape + "[0m" - red = escape + "[31m" // nolint: deadcode, varcheck, unused + red = escape + "[31m" //nolint:nolintlint,unused,varcheck green = escape + "[32m" ) diff --git a/pkg/randutil/randutil.go b/pkg/randutil/randutil.go new file mode 100644 index 0000000..f4b657d --- /dev/null +++ b/pkg/randutil/randutil.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package randutil provides utilities for [cyrpto/rand]. +package randutil + +import ( + "crypto/rand" + "math" + "math/big" +) + +// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood. +func Int63n(n int64) int64 { + b, err := rand.Int(rand.Reader, big.NewInt(n)) + if err != nil { + panic(err) + } + return b.Int64() +} + +// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood. +func Int63() int64 { + return Int63n(math.MaxInt64) +} + +// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood. +func Intn(n int) int { + return int(Int63n(int64(n))) +} + +// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood. +func Int() int { + return int(Int63()) +} diff --git a/pkg/registrar/registrar.go b/pkg/registrar/registrar.go index b83e7ce..d365a57 100644 --- a/pkg/registrar/registrar.go +++ b/pkg/registrar/registrar.go @@ -17,9 +17,8 @@ package registrar import ( + "fmt" "sync" - - "github.com/pkg/errors" ) // Registrar stores one-to-one name<->key mappings. @@ -50,19 +49,19 @@ func (r *Registrar) Reserve(name, key string) error { defer r.lock.Unlock() if name == "" || key == "" { - return errors.Errorf("invalid name %q or key %q", name, key) + return fmt.Errorf("invalid name %q or key %q", name, key) } if k, exists := r.nameToKey[name]; exists { if k != key { - return errors.Errorf("name %q is reserved for %q", name, k) + return fmt.Errorf("name %q is reserved for %q", name, k) } return nil } if n, exists := r.keyToName[key]; exists { if n != name { - return errors.Errorf("key %q is reserved for %q", key, n) + return fmt.Errorf("key %q is reserved for %q", key, n) } return nil } diff --git a/vendor/github.com/containerd/btrfs/doc.go b/pkg/runtimeoptions/v1/doc.go similarity index 84% rename from vendor/github.com/containerd/btrfs/doc.go rename to pkg/runtimeoptions/v1/doc.go index 6aaf2d0..9617e74 100644 --- a/vendor/github.com/containerd/btrfs/doc.go +++ b/pkg/runtimeoptions/v1/doc.go @@ -14,5 +14,4 @@ limitations under the License. */ -// Package btrfs provides bindings for working with btrfs partitions from Go. -package btrfs +package runtimeoptions_v1 //nolint:revive // Ignore var-naming: don't use an underscore in package name (revive) diff --git a/pkg/schedcore/prctl_linux.go b/pkg/schedcore/prctl_linux.go new file mode 100644 index 0000000..11540db --- /dev/null +++ b/pkg/schedcore/prctl_linux.go @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schedcore + +import ( + "golang.org/x/sys/unix" +) + +// PidType is the type of provided pid value and how it should be treated +type PidType int + +const ( + // Pid affects the current pid + Pid PidType = pidtypePid + // ThreadGroup affects all threads in the group + ThreadGroup PidType = pidtypeTgid + // ProcessGroup affects all processes in the group + ProcessGroup PidType = pidtypePgid +) + +const ( + pidtypePid = 0 + pidtypeTgid = 1 + pidtypePgid = 2 +) + +// Create a new sched core domain +func Create(t PidType) error { + return unix.Prctl(unix.PR_SCHED_CORE, unix.PR_SCHED_CORE_CREATE, 0, uintptr(t), 0) +} + +// ShareFrom shares the sched core domain from the provided pid +func ShareFrom(pid uint64, t PidType) error { + return unix.Prctl(unix.PR_SCHED_CORE, unix.PR_SCHED_CORE_SHARE_FROM, uintptr(pid), uintptr(t), 0) +} diff --git a/pkg/seccomp/seccomp_unsupported.go b/pkg/seccomp/seccomp_unsupported.go index 87b1334..4458c1c 100644 --- a/pkg/seccomp/seccomp_unsupported.go +++ b/pkg/seccomp/seccomp_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/pkg/seed/seed.go b/pkg/seed/seed.go index 4a5829a..b8442a8 100644 --- a/pkg/seed/seed.go +++ b/pkg/seed/seed.go @@ -14,6 +14,9 @@ limitations under the License. */ +// Package seed provides an initializer for the global [math/rand] seed. +// +// Deprecated: Do not rely on the global seed. package seed import ( @@ -23,6 +26,8 @@ import ( // WithTimeAndRand seeds the global math rand generator with nanoseconds // XOR'ed with a crypto component if available for uniqueness. +// +// Deprecated: Do not rely on the global seed. func WithTimeAndRand() { var ( b [4]byte diff --git a/pkg/seed/seed_other.go b/pkg/seed/seed_other.go index 30ba9e9..ddefd74 100644 --- a/pkg/seed/seed_other.go +++ b/pkg/seed/seed_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/pkg/shutdown/shutdown.go b/pkg/shutdown/shutdown.go new file mode 100644 index 0000000..bc1af75 --- /dev/null +++ b/pkg/shutdown/shutdown.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shutdown + +import ( + "context" + "errors" + "sync" + "time" + + "golang.org/x/sync/errgroup" +) + +// ErrShutdown is the error condition when a context has been fully shutdown +var ErrShutdown = errors.New("shutdown") + +// Service is used to facilitate shutdown by through callback +// registration and shutdown initiation +type Service interface { + // Shutdown initiates shutdown + Shutdown() + // RegisterCallback registers functions to be called on shutdown and before + // the shutdown channel is closed. A callback error will propagate to the + // context error + RegisterCallback(func(context.Context) error) +} + +// WithShutdown returns a context which is similar to a cancel context, but +// with callbacks which can propagate to the context error. Unlike a cancel +// context, the shutdown context cannot be canceled from the parent context. +// However, future child contexes will be canceled upon shutdown. +func WithShutdown(ctx context.Context) (context.Context, Service) { + ss := &shutdownService{ + Context: ctx, + doneC: make(chan struct{}), + timeout: 30 * time.Second, + } + return ss, ss +} + +type shutdownService struct { + context.Context + + mu sync.Mutex + isShutdown bool + callbacks []func(context.Context) error + doneC chan struct{} + err error + timeout time.Duration +} + +func (s *shutdownService) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + if s.isShutdown { + return + } + s.isShutdown = true + + go func(callbacks []func(context.Context) error) { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + grp, ctx := errgroup.WithContext(ctx) + for i := range callbacks { + fn := callbacks[i] + grp.Go(func() error { return fn(ctx) }) + } + err := grp.Wait() + if err == nil { + err = ErrShutdown + } + s.mu.Lock() + s.err = err + close(s.doneC) + s.mu.Unlock() + }(s.callbacks) +} + +func (s *shutdownService) Done() <-chan struct{} { + return s.doneC +} + +func (s *shutdownService) Err() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.err +} +func (s *shutdownService) RegisterCallback(fn func(context.Context) error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.callbacks == nil { + s.callbacks = []func(context.Context) error{} + } + s.callbacks = append(s.callbacks, fn) +} diff --git a/pkg/snapshotters/annotations.go b/pkg/snapshotters/annotations.go new file mode 100644 index 0000000..c7ad97c --- /dev/null +++ b/pkg/snapshotters/annotations.go @@ -0,0 +1,97 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package snapshotters + +import ( + "context" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NOTE: The following labels contain "cri" prefix but they are not specific to CRI and +// can be used by non-CRI clients as well for enabling remote snapshotters. We need to +// retain that string for keeping compatibility with snapshotter implementations. +const ( + // TargetRefLabel is a label which contains image reference and will be passed + // to snapshotters. + TargetRefLabel = "containerd.io/snapshot/cri.image-ref" + // TargetManifestDigestLabel is a label which contains manifest digest and will be passed + // to snapshotters. + TargetManifestDigestLabel = "containerd.io/snapshot/cri.manifest-digest" + // TargetLayerDigestLabel is a label which contains layer digest and will be passed + // to snapshotters. + TargetLayerDigestLabel = "containerd.io/snapshot/cri.layer-digest" + // TargetImageLayersLabel is a label which contains layer digests contained in + // the target image and will be passed to snapshotters for preparing layers in + // parallel. Skipping some layers is allowed and only affects performance. + TargetImageLayersLabel = "containerd.io/snapshot/cri.image-layers" +) + +// AppendInfoHandlerWrapper makes a handler which appends some basic information +// of images like digests for manifest and their child layers as annotations during unpack. +// These annotations will be passed to snapshotters as labels. These labels will be +// used mainly by remote snapshotters for querying image contents from the remote location. +func AppendInfoHandlerWrapper(ref string) func(f images.Handler) images.Handler { + return func(f images.Handler) images.Handler { + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f.Handle(ctx, desc) + if err != nil { + return nil, err + } + switch desc.MediaType { + case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: + for i := range children { + c := &children[i] + if images.IsLayerType(c.MediaType) { + if c.Annotations == nil { + c.Annotations = make(map[string]string) + } + c.Annotations[TargetRefLabel] = ref + c.Annotations[TargetLayerDigestLabel] = c.Digest.String() + c.Annotations[TargetImageLayersLabel] = getLayers(ctx, TargetImageLayersLabel, children[i:], labels.Validate) + c.Annotations[TargetManifestDigestLabel] = desc.Digest.String() + } + } + } + return children, nil + }) + } +} + +// getLayers returns comma-separated digests based on the passed list of +// descriptors. The returned list contains as many digests as possible as well +// as meets the label validation. +func getLayers(ctx context.Context, key string, descs []ocispec.Descriptor, validate func(k, v string) error) (layers string) { + for _, l := range descs { + if images.IsLayerType(l.MediaType) { + item := l.Digest.String() + if layers != "" { + item = "," + item + } + // This avoids the label hits the size limitation. + if err := validate(key, layers+item); err != nil { + log.G(ctx).WithError(err).WithField("label", key).WithField("digest", l.Digest.String()).Debug("omitting digest in the layers list") + break + } + layers += item + } + } + return +} diff --git a/pkg/snapshotters/annotations_test.go b/pkg/snapshotters/annotations_test.go new file mode 100644 index 0000000..f972f8c --- /dev/null +++ b/pkg/snapshotters/annotations_test.go @@ -0,0 +1,74 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package snapshotters + +import ( + "context" + "fmt" + "strings" + "testing" + + digest "github.com/opencontainers/go-digest" + imagespec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" +) + +func TestImageLayersLabel(t *testing.T) { + sampleKey := "sampleKey" + sampleDigest, err := digest.Parse("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + assert.NoError(t, err) + sampleMaxSize := 300 + sampleValidate := func(k, v string) error { + if (len(k) + len(v)) > sampleMaxSize { + return fmt.Errorf("invalid: %q: %q", k, v) + } + return nil + } + + tests := []struct { + name string + layersNum int + wantNum int + }{ + { + name: "valid number of layers", + layersNum: 2, + wantNum: 2, + }, + { + name: "many layers", + layersNum: 5, // hits sampleMaxSize (300 chars). + wantNum: 4, // layers should be omitted for avoiding invalid label. + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + sampleLayers := make([]imagespec.Descriptor, 0, tt.layersNum) + for i := 0; i < tt.layersNum; i++ { + sampleLayers = append(sampleLayers, imagespec.Descriptor{ + MediaType: imagespec.MediaTypeImageLayerGzip, + Digest: sampleDigest, + }) + } + gotS := getLayers(context.Background(), sampleKey, sampleLayers, sampleValidate) + got := len(strings.Split(gotS, ",")) + assert.Equal(t, tt.wantNum, got) + }) + } +} diff --git a/pkg/testutil/helpers.go b/pkg/testutil/helpers.go index 66b26bc..9a9f857 100644 --- a/pkg/testutil/helpers.go +++ b/pkg/testutil/helpers.go @@ -19,7 +19,6 @@ package testutil import ( "flag" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -53,7 +52,7 @@ func DumpDir(t *testing.T, root string) { } t.Log(fi.Mode(), fmt.Sprintf("%10s", ""), path, "->", target) } else if fi.Mode().IsRegular() { - p, err := ioutil.ReadFile(path) + p, err := os.ReadFile(path) if err != nil { t.Logf("error reading file: %v", err) return nil diff --git a/pkg/testutil/helpers_unix.go b/pkg/testutil/helpers_unix.go index eee004a..b4385bf 100644 --- a/pkg/testutil/helpers_unix.go +++ b/pkg/testutil/helpers_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/testutil/mount_other.go b/pkg/testutil/mount_other.go index 57424c8..412d72b 100644 --- a/pkg/testutil/mount_other.go +++ b/pkg/testutil/mount_other.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows // +build !linux,!windows /* diff --git a/pkg/timeout/timeout.go b/pkg/timeout/timeout.go index 2b9af85..4ecd50e 100644 --- a/pkg/timeout/timeout.go +++ b/pkg/timeout/timeout.go @@ -23,7 +23,7 @@ import ( ) var ( - mu sync.Mutex + mu sync.RWMutex timeouts = make(map[string]time.Duration) // DefaultTimeout of the timeout package @@ -39,9 +39,9 @@ func Set(key string, t time.Duration) { // Get returns the timeout for the provided key func Get(key string) time.Duration { - mu.Lock() + mu.RLock() t, ok := timeouts[key] - mu.Unlock() + mu.RUnlock() if !ok { t = DefaultTimeout } @@ -57,8 +57,8 @@ func WithContext(ctx context.Context, key string) (context.Context, func()) { // All returns all keys and their timeouts func All() map[string]time.Duration { out := make(map[string]time.Duration) - mu.Lock() - defer mu.Unlock() + mu.RLock() + defer mu.RUnlock() for k, v := range timeouts { out[k] = v } diff --git a/pkg/ttrpcutil/client.go b/pkg/ttrpcutil/client.go index 8b4d925..f05ab7a 100644 --- a/pkg/ttrpcutil/client.go +++ b/pkg/ttrpcutil/client.go @@ -17,13 +17,14 @@ package ttrpcutil import ( + "errors" + "fmt" "sync" "time" v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/ttrpc" - "github.com/pkg/errors" ) const ttrpcDialTimeout = 5 * time.Second @@ -43,7 +44,7 @@ func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { connector := func() (*ttrpc.Client, error) { conn, err := dialer.Dialer(address, ttrpcDialTimeout) if err != nil { - return nil, errors.Wrap(err, "failed to connect") + return nil, fmt.Errorf("failed to connect: %w", err) } client := ttrpc.NewClient(conn, opts...) diff --git a/pkg/userns/userns_unsupported.go b/pkg/userns/userns_unsupported.go index aab756f..4f8d7dd 100644 --- a/pkg/userns/userns_unsupported.go +++ b/pkg/userns/userns_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/platforms/compare.go b/platforms/compare.go index c7657e1..3913ef6 100644 --- a/platforms/compare.go +++ b/platforms/compare.go @@ -38,12 +38,22 @@ func platformVector(platform specs.Platform) []specs.Platform { switch platform.Architecture { case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } vector = append(vector, specs.Platform{ Architecture: "386", OS: platform.OS, OSVersion: platform.OSVersion, OSFeatures: platform.OSFeatures, - Variant: platform.Variant, }) case "arm": if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { diff --git a/platforms/compare_test.go b/platforms/compare_test.go index b734c2a..cbe90de 100644 --- a/platforms/compare_test.go +++ b/platforms/compare_test.go @@ -33,6 +33,26 @@ func TestOnly(t *testing.T) { "linux/386", }, false: { + "linux/amd64/v2", + "linux/arm/v7", + "linux/arm64", + "windows/amd64", + "windows/arm", + }, + }, + }, + { + platform: "linux/amd64/v2", + matches: map[bool][]string{ + true: { + "linux/amd64", + "linux/amd64/v1", + "linux/amd64/v2", + "linux/386", + }, + false: { + "linux/amd64/v3", + "linux/amd64/v4", "linux/arm/v7", "linux/arm64", "windows/amd64", diff --git a/platforms/cpuinfo.go b/platforms/cpuinfo.go index 4a7177e..8c600fc 100644 --- a/platforms/cpuinfo.go +++ b/platforms/cpuinfo.go @@ -17,15 +17,10 @@ package platforms import ( - "bufio" - "os" "runtime" - "strings" "sync" - "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) // Present the ARM instruction set architecture, eg: v7, v8 @@ -37,95 +32,12 @@ var cpuVariantOnce sync.Once func cpuVariant() string { cpuVariantOnce.Do(func() { if isArmArch(runtime.GOARCH) { - cpuVariantValue = getCPUVariant() + var err error + cpuVariantValue, err = getCPUVariant() + if err != nil { + log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err) + } } }) return cpuVariantValue } - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - - return variant - } - - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - log.L.WithError(err).Error("failure getting variant") - return "" - } - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model, err := getCPUInfo("model name") - if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant -} diff --git a/platforms/cpuinfo_linux.go b/platforms/cpuinfo_linux.go new file mode 100644 index 0000000..722d86c --- /dev/null +++ b/platforms/cpuinfo_linux.go @@ -0,0 +1,161 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/errdefs" + "golang.org/x/sys/unix" +) + +// getMachineArch retrieves the machine architecture through system call +func getMachineArch() (string, error) { + var uname unix.Utsname + err := unix.Uname(&uname) + if err != nil { + return "", err + } + + arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)]) + + return arch, nil +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errdefs.ErrNotFound) +} + +// getCPUVariantFromArch get CPU variant from arch through a system call +func getCPUVariantFromArch(arch string) (string, error) { + + var variant string + + arch = strings.ToLower(arch) + + if arch == "aarch64" { + variant = "8" + } else if arch[0:4] == "armv" && len(arch) >= 5 { + //Valid arch format is in form of armvXx + switch arch[3:5] { + case "v8": + variant = "8" + case "v7": + variant = "7" + case "v6": + variant = "6" + case "v5": + variant = "5" + case "v4": + variant = "4" + case "v3": + variant = "3" + default: + variant = "unknown" + } + } else { + return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errdefs.ErrInvalidArgument) + } + return variant, nil +} + +// getCPUVariant returns cpu variant for ARM +// We first try reading "Cpu architecture" field from /proc/cpuinfo +// If we can't find it, then fall back using a system call +// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo +// was not present. +func getCPUVariant() (string, error) { + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + if errdefs.IsNotFound(err) { + //Let's try getting CPU variant from machine architecture + arch, err := getMachineArch() + if err != nil { + return "", fmt.Errorf("failure getting machine architecture: %v", err) + } + + variant, err = getCPUVariantFromArch(arch) + if err != nil { + return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err) + } + } else { + return "", fmt.Errorf("failure getting CPU variant: %v", err) + } + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant, nil +} diff --git a/platforms/cpuinfo_linux_test.go b/platforms/cpuinfo_linux_test.go new file mode 100644 index 0000000..c0b8b0f --- /dev/null +++ b/platforms/cpuinfo_linux_test.go @@ -0,0 +1,141 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "errors" + "runtime" + "testing" + + "github.com/containerd/containerd/errdefs" +) + +func TestCPUVariant(t *testing.T) { + if !isArmArch(runtime.GOARCH) { + t.Skip("only relevant on linux/arm") + } + + variants := []string{"v8", "v7", "v6", "v5", "v4", "v3"} + + p, err := getCPUVariant() + if err != nil { + t.Fatalf("Error getting CPU variant: %v", err) + return + } + + for _, variant := range variants { + if p == variant { + t.Logf("got valid variant as expected: %#v = %#v", p, variant) + return + } + } + + t.Fatalf("could not get valid variant as expected: %v", variants) +} + +func TestGetCPUVariantFromArch(t *testing.T) { + + for _, testcase := range []struct { + name string + input string + output string + expectedErr error + }{ + { + name: "Test aarch64", + input: "aarch64", + output: "8", + expectedErr: nil, + }, + { + name: "Test Armv8 with capital", + input: "Armv8", + output: "8", + expectedErr: nil, + }, + { + name: "Test armv7", + input: "armv7", + output: "7", + expectedErr: nil, + }, + { + name: "Test armv6", + input: "armv6", + output: "6", + expectedErr: nil, + }, + { + name: "Test armv5", + input: "armv5", + output: "5", + expectedErr: nil, + }, + { + name: "Test armv4", + input: "armv4", + output: "4", + expectedErr: nil, + }, + { + name: "Test armv3", + input: "armv3", + output: "3", + expectedErr: nil, + }, + { + name: "Test unknown input", + input: "armv9", + output: "unknown", + expectedErr: nil, + }, + { + name: "Test invalid input which doesn't start with armv", + input: "armxxxx", + output: "", + expectedErr: errdefs.ErrInvalidArgument, + }, + { + name: "Test invalid input whose length is less than 5", + input: "armv", + output: "", + expectedErr: errdefs.ErrInvalidArgument, + }, + } { + t.Run(testcase.name, func(t *testing.T) { + t.Logf("input: %v", testcase.input) + + variant, err := getCPUVariantFromArch(testcase.input) + + if err == nil { + if testcase.expectedErr != nil { + t.Fatalf("Expect to get error: %v, however no error got", testcase.expectedErr) + } else { + if variant != testcase.output { + t.Fatalf("Expect to get variant: %v, however %v returned", testcase.output, variant) + } + } + + } else { + if !errors.Is(err, testcase.expectedErr) { + t.Fatalf("Expect to get error: %v, however error %v returned", testcase.expectedErr, err) + } + } + }) + + } +} diff --git a/platforms/cpuinfo_other.go b/platforms/cpuinfo_other.go new file mode 100644 index 0000000..51fb62e --- /dev/null +++ b/platforms/cpuinfo_other.go @@ -0,0 +1,60 @@ +//go:build !linux +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + + "github.com/containerd/containerd/errdefs" +) + +func getCPUVariant() (string, error) { + + var variant string + + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + } else if runtime.GOOS == "freebsd" { + // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated) + // detecting those variants is currently unimplemented + switch runtime.GOARCH { + case "arm64": + variant = "v8" + default: + variant = "unknown" + } + + } else { + return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errdefs.ErrNotImplemented) + + } + + return variant, nil +} diff --git a/platforms/database.go b/platforms/database.go index 6ede940..2e26fd3 100644 --- a/platforms/database.go +++ b/platforms/database.go @@ -21,13 +21,6 @@ import ( "strings" ) -// isLinuxOS returns true if the operating system is Linux. -// -// The OS value should be normalized before calling this function. -func isLinuxOS(os string) bool { - return os == "linux" -} - // These function are generated from https://golang.org/src/go/build/syslist.go. // // We use switch statements because they are slightly faster than map lookups @@ -38,7 +31,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +53,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false @@ -86,9 +79,11 @@ func normalizeArch(arch, variant string) (string, string) { case "i386": arch = "386" variant = "" - case "x86_64", "x86-64": + case "x86_64", "x86-64", "amd64": arch = "amd64" - variant = "" + if variant == "v1" { + variant = "" + } case "aarch64", "arm64": arch = "arm64" switch variant { diff --git a/platforms/defaults.go b/platforms/defaults.go index cb77fbc..cfa3ff3 100644 --- a/platforms/defaults.go +++ b/platforms/defaults.go @@ -16,27 +16,11 @@ package platforms -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - // DefaultString returns the default string specifier for the platform. func DefaultString() string { return Format(DefaultSpec()) } -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - // DefaultStrict returns strict form of Default. func DefaultStrict() MatchComparer { return OnlyStrict(DefaultSpec()) diff --git a/platforms/defaults_darwin.go b/platforms/defaults_darwin.go new file mode 100644 index 0000000..e249fe4 --- /dev/null +++ b/platforms/defaults_darwin.go @@ -0,0 +1,45 @@ +//go:build darwin +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/platforms/defaults_unix.go b/platforms/defaults_unix.go index e8a7d5f..49690f1 100644 --- a/platforms/defaults_unix.go +++ b/platforms/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. @@ -18,6 +19,22 @@ package platforms +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + // Default returns the default matcher for the platform. func Default() MatchComparer { return Only(DefaultSpec()) diff --git a/platforms/defaults_test.go b/platforms/defaults_unix_test.go similarity index 96% rename from platforms/defaults_test.go rename to platforms/defaults_unix_test.go index 62df535..a718c5c 100644 --- a/platforms/defaults_test.go +++ b/platforms/defaults_unix_test.go @@ -1,3 +1,6 @@ +//go:build !windows +// +build !windows + /* Copyright The containerd Authors. diff --git a/platforms/defaults_windows.go b/platforms/defaults_windows.go index 0c380e3..fa31aaf 100644 --- a/platforms/defaults_windows.go +++ b/platforms/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -24,11 +22,24 @@ import ( "strconv" "strings" + "github.com/Microsoft/hcsshim/osversion" imagespec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/windows" ) +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + type matchComparer struct { defaults Matcher osVersionPrefix string @@ -36,14 +47,39 @@ type matchComparer struct { // Match matches platform with the same windows major, minor // and build version. -func (m matchComparer) Match(p imagespec.Platform) bool { - if m.defaults.Match(p) { - // TODO(windows): Figure out whether OSVersion is deprecated. - return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) +func (m matchComparer) Match(p specs.Platform) bool { + match := m.defaults.Match(p) + + if match && p.OS == "windows" { + // HPC containers do not have OS version filled + if p.OSVersion == "" { + return true + } + + hostOsVersion := getOSVersion(m.osVersionPrefix) + ctrOsVersion := getOSVersion(p.OSVersion) + return osversion.CheckHostAndContainerCompat(hostOsVersion, ctrOsVersion) } return false } +func getOSVersion(osVersionPrefix string) osversion.OSVersion { + parts := strings.Split(osVersionPrefix, ".") + if len(parts) < 3 { + return osversion.OSVersion{} + } + + majorVersion, _ := strconv.Atoi(parts[0]) + minorVersion, _ := strconv.Atoi(parts[1]) + buildNumber, _ := strconv.Atoi(parts[2]) + + return osversion.OSVersion{ + MajorVersion: uint8(majorVersion), + MinorVersion: uint8(minorVersion), + Build: uint16(buildNumber), + } +} + // Less sorts matched platforms in front of other platforms. // For matched platforms, it puts platforms with larger revision // number in front. diff --git a/platforms/defaults_windows_test.go b/platforms/defaults_windows_test.go index abf7718..60b8e74 100644 --- a/platforms/defaults_windows_test.go +++ b/platforms/defaults_windows_test.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,30 +17,59 @@ package platforms import ( + "fmt" + "reflect" + "runtime" "sort" "testing" imagespec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" ) +func TestDefault(t *testing.T) { + major, minor, build := windows.RtlGetNtVersionNumbers() + expected := imagespec.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + Variant: cpuVariant(), + } + p := DefaultSpec() + if !reflect.DeepEqual(p, expected) { + t.Fatalf("default platform not as expected: %#v != %#v", p, expected) + } + + s := DefaultString() + if s != Format(p) { + t.Fatalf("default specifier should match formatted default spec: %v != %v", s, p) + } +} + func TestMatchComparerMatch(t *testing.T) { + major, minor, build := windows.RtlGetNtVersionNumbers() + buildStr := fmt.Sprintf("%d.%d.%d", major, minor, build) m := matchComparer{ defaults: Only(imagespec.Platform{ Architecture: "amd64", OS: "windows", }), - osVersionPrefix: "10.0.17763", + osVersionPrefix: buildStr, } for _, test := range []struct { platform imagespec.Platform match bool }{ + { + platform: DefaultSpec(), + match: true, + }, { platform: imagespec.Platform{ Architecture: "amd64", OS: "windows", - OSVersion: "10.0.17763.1", + OSVersion: buildStr + ".1", }, match: true, }, @@ -50,7 +77,7 @@ func TestMatchComparerMatch(t *testing.T) { platform: imagespec.Platform{ Architecture: "amd64", OS: "windows", - OSVersion: "10.0.17763.2", + OSVersion: buildStr + ".2", }, match: true, }, @@ -58,7 +85,8 @@ func TestMatchComparerMatch(t *testing.T) { platform: imagespec.Platform{ Architecture: "amd64", OS: "windows", - OSVersion: "10.0.17762.1", + // Use an nonexistent Windows build so we don't get a match. Ws2019's build is 17763/ + OSVersion: "10.0.17762.1", }, match: false, }, @@ -66,7 +94,8 @@ func TestMatchComparerMatch(t *testing.T) { platform: imagespec.Platform{ Architecture: "amd64", OS: "windows", - OSVersion: "10.0.17764.1", + // Use an nonexistent Windows build so we don't get a match. Ws2019's build is 17763/ + OSVersion: "10.0.17764.1", }, match: false, }, @@ -75,7 +104,7 @@ func TestMatchComparerMatch(t *testing.T) { Architecture: "amd64", OS: "windows", }, - match: false, + match: true, }, } { assert.Equal(t, test.match, m.Match(test.platform)) @@ -130,11 +159,11 @@ func TestMatchComparerLess(t *testing.T) { { Architecture: "amd64", OS: "windows", - OSVersion: "10.0.17764.1", }, { Architecture: "amd64", OS: "windows", + OSVersion: "10.0.17764.1", }, { Architecture: "amd64", diff --git a/platforms/platforms.go b/platforms/platforms.go index 088bdea..2343099 100644 --- a/platforms/platforms.go +++ b/platforms/platforms.go @@ -27,40 +27,40 @@ // The vast majority of use cases should simply use the match function with // user input. The first step is to parse a specifier into a matcher: // -// m, err := Parse("linux") -// if err != nil { ... } +// m, err := Parse("linux") +// if err != nil { ... } // // Once you have a matcher, use it to match against the platform declared by a // component, typically from an image or runtime. Since extracting an images // platform is a little more involved, we'll use an example against the // platform default: // -// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// if ok := m.Match(Default()); !ok { /* doesn't match */ } // // This can be composed in loops for resolving runtimes or used as a filter for // fetch and select images. // // More details of the specifier syntax and platform spec follow. // -// Declaring Platform Support +// # Declaring Platform Support // // Components that have strict platform requirements should use the OCI // platform specification to declare their support. Typically, this will be // images and runtimes that should make these declaring which platform they // support specifically. This looks roughly as follows: // -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } // // Most images and runtimes should at least set Architecture and OS, according // to their GOARCH and GOOS values, respectively (follow the OCI image // specification when in doubt). ARM should set variant under certain // discussions, which are outlined below. // -// Platform Specifiers +// # Platform Specifiers // // While the OCI platform specifications provide a tool for components to // specify structured information, user input typically doesn't need the full @@ -77,7 +77,7 @@ // where the architecture may be known but a runtime may support images from // different operating systems. // -// Normalization +// # Normalization // // Because not all users are familiar with the way the Go runtime represents // platforms, several normalizations have been provided to make this package @@ -85,17 +85,17 @@ // // The following are performed for architectures: // -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 // // We also normalize the operating system `macos` to `darwin`. // -// ARM Support +// # ARM Support // // To qualify ARM architecture, the Variant field is used to qualify the arm // version. The most common arm version, v7, is represented without the variant @@ -107,6 +107,8 @@ package platforms import ( + "fmt" + "path" "regexp" "runtime" "strconv" @@ -114,7 +116,6 @@ import ( "github.com/containerd/containerd/errdefs" specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( @@ -166,14 +167,14 @@ func (m *matcher) String() string { func Parse(specifier string) (specs.Platform, error) { if strings.Contains(specifier, "*") { // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) } parts := strings.Split(specifier, "/") for _, part := range parts { if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) } } @@ -205,7 +206,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) case 2: // In this case, we treat as a regular os/arch pair. We don't care // about whether or not we know of the platform. @@ -227,7 +228,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) } // MustParse is like Parses but panics if the specifier cannot be parsed. @@ -246,20 +247,7 @@ func Format(platform specs.Platform) string { return "unknown" } - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") + return path.Join(platform.OS, platform.Architecture, platform.Variant) } // Normalize validates and translate the platform to the canonical value. @@ -269,10 +257,5 @@ func joinNotEmpty(s ...string) string { func Normalize(platform specs.Platform) specs.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - return platform } diff --git a/platforms/platforms_test.go b/platforms/platforms_test.go index 66abc06..c070dda 100644 --- a/platforms/platforms_test.go +++ b/platforms/platforms_test.go @@ -17,6 +17,7 @@ package platforms import ( + "path" "reflect" "runtime" "testing" @@ -204,7 +205,7 @@ func TestParseSelector(t *testing.T) { OS: defaultOS, Architecture: "arm", }, - formatted: joinNotEmpty(defaultOS, "arm"), + formatted: path.Join(defaultOS, "arm"), }, { input: "armel", @@ -213,7 +214,7 @@ func TestParseSelector(t *testing.T) { Architecture: "arm", Variant: "v6", }, - formatted: joinNotEmpty(defaultOS, "arm/v6"), + formatted: path.Join(defaultOS, "arm/v6"), }, { input: "armhf", @@ -221,7 +222,7 @@ func TestParseSelector(t *testing.T) { OS: defaultOS, Architecture: "arm", }, - formatted: joinNotEmpty(defaultOS, "arm"), + formatted: path.Join(defaultOS, "arm"), }, { input: "Aarch64", @@ -229,7 +230,7 @@ func TestParseSelector(t *testing.T) { OS: defaultOS, Architecture: "arm64", }, - formatted: joinNotEmpty(defaultOS, "arm64"), + formatted: path.Join(defaultOS, "arm64"), }, { input: "x86_64", @@ -237,7 +238,7 @@ func TestParseSelector(t *testing.T) { OS: defaultOS, Architecture: "amd64", }, - formatted: joinNotEmpty(defaultOS, "amd64"), + formatted: path.Join(defaultOS, "amd64"), }, { input: "Linux/x86_64", @@ -253,7 +254,7 @@ func TestParseSelector(t *testing.T) { OS: defaultOS, Architecture: "386", }, - formatted: joinNotEmpty(defaultOS, "386"), + formatted: path.Join(defaultOS, "386"), }, { input: "linux", @@ -262,7 +263,7 @@ func TestParseSelector(t *testing.T) { Architecture: defaultArch, Variant: defaultVariant, }, - formatted: joinNotEmpty("linux", defaultArch, defaultVariant), + formatted: path.Join("linux", defaultArch, defaultVariant), }, { input: "s390x", @@ -270,7 +271,7 @@ func TestParseSelector(t *testing.T) { OS: defaultOS, Architecture: "s390x", }, - formatted: joinNotEmpty(defaultOS, "s390x"), + formatted: path.Join(defaultOS, "s390x"), }, { input: "linux/s390x", @@ -287,7 +288,7 @@ func TestParseSelector(t *testing.T) { Architecture: defaultArch, Variant: defaultVariant, }, - formatted: joinNotEmpty("darwin", defaultArch, defaultVariant), + formatted: path.Join("darwin", defaultArch, defaultVariant), }, } { t.Run(testcase.input, func(t *testing.T) { diff --git a/platforms/cpuinfo_test.go b/platforms/platforms_windows_test.go similarity index 58% rename from platforms/cpuinfo_test.go rename to platforms/platforms_windows_test.go index fca6b69..d5f3883 100644 --- a/platforms/cpuinfo_test.go +++ b/platforms/platforms_windows_test.go @@ -17,24 +17,25 @@ package platforms import ( - "runtime" "testing" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/require" ) -func TestCPUVariant(t *testing.T) { - if !isArmArch(runtime.GOARCH) || !isLinuxOS(runtime.GOOS) { - t.Skip("only relevant on linux/arm") - } - - variants := []string{"v8", "v7", "v6", "v5", "v4", "v3"} - - p := getCPUVariant() - for _, variant := range variants { - if p == variant { - t.Logf("got valid variant as expected: %#v = %#v\n", p, variant) - return - } - } - - t.Fatalf("could not get valid variant as expected: %v\n", variants) +func TestNormalize(t *testing.T) { + require.Equal(t, DefaultSpec(), Normalize(DefaultSpec())) +} + +func TestFallbackOnOSVersion(t *testing.T) { + p := specs.Platform{ + OS: "windows", + Architecture: "amd64", + OSVersion: "99.99.99.99", + } + + other := specs.Platform{OS: p.OS, Architecture: p.Architecture} + + m := NewMatcher(p) + require.True(t, m.Match(other)) } diff --git a/plugin/context.go b/plugin/context.go index 75b7366..cf91678 100644 --- a/plugin/context.go +++ b/plugin/context.go @@ -18,23 +18,26 @@ package plugin import ( "context" + "fmt" "path/filepath" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events/exchange" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) -// InitContext is used for plugin inititalization +// InitContext is used for plugin initialization type InitContext struct { - Context context.Context - Root string - State string - Config interface{} - Address string - TTRPCAddress string - Events *exchange.Exchange + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + RegisterReadiness func() func() + + // deprecated: will be removed in 2.0, use plugin.EventType + Events *exchange.Exchange Meta *Meta // plugins can fill in metadata at init. @@ -115,7 +118,7 @@ func (ps *Set) Add(p *Plugin) error { } else if _, idok := byID[p.Registration.ID]; !idok { byID[p.Registration.ID] = p } else { - return errors.Wrapf(errdefs.ErrAlreadyExists, "plugin %v already initialized", p.Registration.URI()) + return fmt.Errorf("plugin %v already initialized: %w", p.Registration.URI(), errdefs.ErrAlreadyExists) } ps.ordered = append(ps.ordered, p) @@ -127,19 +130,42 @@ func (ps *Set) Get(t Type) (interface{}, error) { for _, v := range ps.byTypeAndID[t] { return v.Instance() } - return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t) + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) +} + +// GetAll returns all initialized plugins +func (ps *Set) GetAll() []*Plugin { + return ps.ordered +} + +// Plugins returns plugin set +func (i *InitContext) Plugins() *Set { + return i.plugins } // GetAll plugins in the set func (i *InitContext) GetAll() []*Plugin { - return i.plugins.ordered + return i.plugins.GetAll() +} + +// GetByID returns the plugin of the given type and ID +func (i *InitContext) GetByID(t Type, id string) (interface{}, error) { + ps, err := i.GetByType(t) + if err != nil { + return nil, err + } + p, ok := ps[id] + if !ok { + return nil, fmt.Errorf("no %s plugins with id %s: %w", t, id, errdefs.ErrNotFound) + } + return p.Instance() } // GetByType returns all plugins with the specific type. func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) { p, ok := i.plugins.byTypeAndID[t] if !ok { - return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t) + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) } return p, nil diff --git a/plugin/plugin.go b/plugin/plugin.go index 2674ede..eb38c12 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -17,12 +17,9 @@ package plugin import ( + "errors" "fmt" "sync" - - "github.com/containerd/ttrpc" - "github.com/pkg/errors" - "google.golang.org/grpc" ) var ( @@ -63,6 +60,8 @@ const ( ServicePlugin Type = "io.containerd.service.v1" // GRPCPlugin implements a grpc service GRPCPlugin Type = "io.containerd.grpc.v1" + // TTRPCPlugin implements a ttrpc shim service + TTRPCPlugin Type = "io.containerd.ttrpc.v1" // SnapshotPlugin implements a snapshotter SnapshotPlugin Type = "io.containerd.snapshotter.v1" // TaskMonitorPlugin implements a task monitor @@ -75,6 +74,10 @@ const ( ContentPlugin Type = "io.containerd.content.v1" // GCPlugin implements garbage collection policy GCPlugin Type = "io.containerd.gc.v1" + // EventPlugin implements event handling + EventPlugin Type = "io.containerd.event.v1" + // TracingProcessorPlugin implements a open telemetry span processor + TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1" ) const ( @@ -122,21 +125,6 @@ func (r *Registration) URI() string { return fmt.Sprintf("%s.%s", r.Type, r.ID) } -// Service allows GRPC services to be registered with the underlying server -type Service interface { - Register(*grpc.Server) error -} - -// TTRPCService allows TTRPC services to be registered with the underlying server -type TTRPCService interface { - RegisterTTRPC(*ttrpc.Server) error -} - -// TCPService allows GRPC services to be registered with the underlying tcp server -type TCPService interface { - RegisterTCP(*grpc.Server) error -} - var register = struct { sync.RWMutex r []*Registration @@ -171,15 +159,11 @@ func Register(r *Registration) { panic(err) } - var last bool for _, requires := range r.Requires { - if requires == "*" { - last = true + if requires == "*" && len(r.Requires) != 1 { + panic(ErrInvalidRequires) } } - if last && len(r.Requires) != 1 { - panic(ErrInvalidRequires) - } register.r = append(register.r, r) } @@ -187,7 +171,7 @@ func Register(r *Registration) { func checkUnique(r *Registration) error { for _, registered := range register.r { if r.URI() == registered.URI() { - return errors.Wrap(ErrIDRegistered, r.URI()) + return fmt.Errorf("%s: %w", r.URI(), ErrIDRegistered) } } return nil diff --git a/plugin/plugin_go18.go b/plugin/plugin_go18.go index 927fe61..0df0669 100644 --- a/plugin/plugin_go18.go +++ b/plugin/plugin_go18.go @@ -1,3 +1,4 @@ +//go:build go1.8 && !windows && amd64 && !static_build && !gccgo // +build go1.8,!windows,amd64,!static_build,!gccgo /* diff --git a/plugin/plugin_other.go b/plugin/plugin_other.go index 0c5e141..a2883bb 100644 --- a/plugin/plugin_other.go +++ b/plugin/plugin_other.go @@ -1,3 +1,4 @@ +//go:build !go1.8 || windows || !amd64 || static_build || gccgo // +build !go1.8 windows !amd64 static_build gccgo /* diff --git a/plugin/plugin_test.go b/plugin/plugin_test.go new file mode 100644 index 0000000..b79e054 --- /dev/null +++ b/plugin/plugin_test.go @@ -0,0 +1,385 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "testing" + + "github.com/containerd/containerd/services" +) + +func registerClear() { + register.Lock() + defer register.Unlock() + register.r = nil +} + +func mockPluginFilter(*Registration) bool { + return false +} + +var tasksServiceRequires = []Type{ + RuntimePlugin, + RuntimePluginV2, + MetadataPlugin, + TaskMonitorPlugin, +} + +// TestContainerdPlugin tests the logic of Graph, use the containerd's plugin +func TestContainerdPlugin(t *testing.T) { + registerClear() + Register(&Registration{ + Type: TaskMonitorPlugin, + ID: "cgroups", + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.TasksService, + Requires: tasksServiceRequires, + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.IntrospectionService, + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.NamespacesService, + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "namespaces", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "content", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "containers", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.ContainersService, + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "events", + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "leases", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.LeasesService, + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "diff", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.DiffService, + Requires: []Type{ + DiffPlugin, + }, + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.SnapshotsService, + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "snapshots", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "version", + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "images", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: GCPlugin, + ID: "scheduler", + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: RuntimePluginV2, + ID: "task", + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "tasks", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "introspection", + Requires: []Type{"*"}, + }) + Register(&Registration{ + Type: ServicePlugin, + ID: services.ContentService, + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "healthcheck", + }) + Register(&Registration{ + Type: InternalPlugin, + ID: "opt", + }) + Register(&Registration{ + Type: GRPCPlugin, + ID: "cri", + Requires: []Type{ + ServicePlugin, + }, + }) + Register(&Registration{ + Type: RuntimePlugin, + ID: "linux", + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: InternalPlugin, + Requires: []Type{ + ServicePlugin, + }, + ID: "restart", + }) + Register(&Registration{ + Type: DiffPlugin, + ID: "walking", + Requires: []Type{ + MetadataPlugin, + }, + }) + Register(&Registration{ + Type: SnapshotPlugin, + ID: "native", + }) + Register(&Registration{ + Type: SnapshotPlugin, + ID: "overlayfs", + }) + Register(&Registration{ + Type: ContentPlugin, + ID: "content", + }) + Register(&Registration{ + Type: MetadataPlugin, + ID: "bolt", + Requires: []Type{ + ContentPlugin, + SnapshotPlugin, + }, + }) + + ordered := Graph(mockPluginFilter) + expectedURI := []string{ + "io.containerd.monitor.v1.cgroups", + "io.containerd.content.v1.content", + "io.containerd.snapshotter.v1.native", + "io.containerd.snapshotter.v1.overlayfs", + "io.containerd.metadata.v1.bolt", + "io.containerd.runtime.v1.linux", + "io.containerd.runtime.v2.task", + "io.containerd.service.v1.tasks-service", + "io.containerd.service.v1.introspection-service", + "io.containerd.service.v1.namespaces-service", + "io.containerd.service.v1.containers-service", + "io.containerd.service.v1.leases-service", + "io.containerd.differ.v1.walking", + "io.containerd.service.v1.diff-service", + "io.containerd.service.v1.snapshots-service", + "io.containerd.service.v1.content-service", + "io.containerd.grpc.v1.namespaces", + "io.containerd.grpc.v1.content", + "io.containerd.grpc.v1.containers", + "io.containerd.grpc.v1.events", + "io.containerd.grpc.v1.leases", + "io.containerd.grpc.v1.diff", + "io.containerd.grpc.v1.snapshots", + "io.containerd.grpc.v1.version", + "io.containerd.grpc.v1.images", + "io.containerd.gc.v1.scheduler", + "io.containerd.grpc.v1.tasks", + "io.containerd.grpc.v1.healthcheck", + "io.containerd.internal.v1.opt", + "io.containerd.grpc.v1.cri", + "io.containerd.internal.v1.restart", + "io.containerd.grpc.v1.introspection", + } + cmpOrdered(t, ordered, expectedURI) +} + +func cmpOrdered(t *testing.T, ordered []*Registration, expectedURI []string) { + if len(ordered) != len(expectedURI) { + t.Fatalf("ordered compare failed, %d != %d", len(ordered), len(expectedURI)) + } + for i := range ordered { + if ordered[i].URI() != expectedURI[i] { + t.Fatalf("graph failed, expected: %s, but return: %s", expectedURI[i], ordered[i].URI()) + } + } +} + +// TestPluginGraph tests the logic of Graph +func TestPluginGraph(t *testing.T) { + for _, testcase := range []struct { + input []*Registration + expectedURI []string + }{ + // test requires * + { + input: []*Registration{ + { + Type: "grpc", + ID: "introspection", + Requires: []Type{ + "*", + }, + }, + { + Type: "service", + ID: "container", + }, + }, + expectedURI: []string{ + "service.container", + "grpc.introspection", + }, + }, + // test requires + { + input: []*Registration{ + { + Type: "service", + ID: "container", + Requires: []Type{ + "metadata", + }, + }, + { + Type: "metadata", + ID: "bolt", + }, + }, + expectedURI: []string{ + "metadata.bolt", + "service.container", + }, + }, + { + input: []*Registration{ + { + Type: "metadata", + ID: "bolt", + Requires: []Type{ + "content", + "snapshotter", + }, + }, + { + Type: "snapshotter", + ID: "overlayfs", + }, + { + Type: "content", + ID: "content", + }, + }, + expectedURI: []string{ + "content.content", + "snapshotter.overlayfs", + "metadata.bolt", + }, + }, + // test disable + { + input: []*Registration{ + { + Type: "content", + ID: "content", + }, + { + Type: "disable", + ID: "disable", + Disable: true, + }, + }, + expectedURI: []string{ + "content.content", + }, + }, + } { + registerClear() + for _, in := range testcase.input { + Register(in) + } + ordered := Graph(mockPluginFilter) + cmpOrdered(t, ordered, testcase.expectedURI) + } +} diff --git a/process.go b/process.go index 5b30256..297c77a 100644 --- a/process.go +++ b/process.go @@ -18,6 +18,7 @@ package containerd import ( "context" + "fmt" "strings" "syscall" "time" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) // Process represents a system process @@ -71,8 +71,10 @@ type ExitStatus struct { // Result returns the exit code and time of the exit status. // An error may be returned here to which indicates there was an error -// at some point while waiting for the exit status. It does not signify -// an error with the process itself. +// +// at some point while waiting for the exit status. It does not signify +// an error with the process itself. +// // If an error is returned, the process may still be running. func (s ExitStatus) Result() (uint32, time.Time, error) { return s.code, s.exitedAt, s.err @@ -210,7 +212,7 @@ func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitS } switch status.Status { case Running, Paused, Pausing: - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "process must be stopped before deletion") + return nil, fmt.Errorf("current process state: %s, process must be stopped before deletion: %w", status.Status, errdefs.ErrFailedPrecondition) } r, err := p.task.client.TaskService().DeleteProcess(ctx, &tasks.DeleteProcessRequest{ ContainerID: p.task.id, diff --git a/protobuf/plugin/fieldpath/fieldpath.go b/protobuf/plugin/fieldpath/fieldpath.go index 2c724a3..b5f3ff1 100644 --- a/protobuf/plugin/fieldpath/fieldpath.go +++ b/protobuf/plugin/fieldpath/fieldpath.go @@ -19,7 +19,6 @@ package fieldpath import ( "strings" - "github.com/containerd/containerd/protobuf/plugin" "github.com/gogo/protobuf/gogoproto" "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" "github.com/gogo/protobuf/protoc-gen-gogo/generator" @@ -51,7 +50,7 @@ func (p *fieldpathGenerator) Generate(file *generator.FileDescriptor) { if m.DescriptorProto.GetOptions().GetMapEntry() { continue } - if plugin.FieldpathEnabled(file.FileDescriptorProto, m.DescriptorProto) { + if Enabled(file.FileDescriptorProto, m.DescriptorProto) { p.generateMessage(m) } } diff --git a/protobuf/plugin/helpers.go b/protobuf/plugin/fieldpath/helpers.go similarity index 66% rename from protobuf/plugin/helpers.go rename to protobuf/plugin/fieldpath/helpers.go index 912fda2..5bd1b78 100644 --- a/protobuf/plugin/helpers.go +++ b/protobuf/plugin/fieldpath/helpers.go @@ -14,14 +14,15 @@ limitations under the License. */ -package plugin +package fieldpath import ( + "github.com/containerd/containerd/protobuf/plugin" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" ) -// FieldpathEnabled returns true if E_Fieldpath is enabled -func FieldpathEnabled(file *descriptor.FileDescriptorProto, message *descriptor.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Fieldpath, proto.GetBoolExtension(file.Options, E_FieldpathAll, false)) +// Enabled returns true if E_Fieldpath is enabled +func Enabled(file *descriptor.FileDescriptorProto, message *descriptor.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, plugin.E_Fieldpath, proto.GetBoolExtension(file.Options, plugin.E_FieldpathAll, false)) } diff --git a/pull.go b/pull.go index 3636551..92f7719 100644 --- a/pull.go +++ b/pull.go @@ -18,6 +18,8 @@ package containerd import ( "context" + "errors" + "fmt" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" @@ -26,7 +28,6 @@ import ( "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker/schema1" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" ) @@ -49,7 +50,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima } else { p, err := platforms.Parse(pullCtx.Platforms[0]) if err != nil { - return nil, errors.Wrapf(err, "invalid platform %s", pullCtx.Platforms[0]) + return nil, fmt.Errorf("invalid platform %s: %w", pullCtx.Platforms[0], err) } pullCtx.PlatformMatcher = platforms.Only(p) @@ -70,13 +71,13 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima // unpacker only supports schema 2 image, for schema 1 this is noop. u, err := c.newUnpacker(ctx, pullCtx) if err != nil { - return nil, errors.Wrap(err, "create unpacker") + return nil, fmt.Errorf("create unpacker: %w", err) } unpackWrapper, unpackEg = u.handlerWrapper(ctx, pullCtx, &unpacks) defer func() { if err := unpackEg.Wait(); err != nil { if retErr == nil { - retErr = errors.Wrap(err, "unpack") + retErr = fmt.Errorf("unpack: %w", err) } } }() @@ -117,7 +118,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima // Try to unpack is none is done previously. // This is at least required for schema 1 image. if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { - return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) + return nil, fmt.Errorf("failed to unpack image on snapshotter %s: %w", pullCtx.Snapshotter, err) } } } @@ -129,12 +130,12 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim store := c.ContentStore() name, desc, err := rCtx.Resolver.Resolve(ctx, ref) if err != nil { - return images.Image{}, errors.Wrapf(err, "failed to resolve reference %q", ref) + return images.Image{}, fmt.Errorf("failed to resolve reference %q: %w", ref, err) } fetcher, err := rCtx.Resolver.Fetcher(ctx, name) if err != nil { - return images.Image{}, errors.Wrapf(err, "failed to get fetcher for %q", name) + return images.Image{}, fmt.Errorf("failed to get fetcher for %q: %w", name, err) } var ( diff --git a/reference/docker/reference.go b/reference/docker/reference.go index 0998639..1ef223d 100644 --- a/reference/docker/reference.go +++ b/reference/docker/reference.go @@ -19,13 +19,13 @@ // // Grammar // -// reference := name [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ +// alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ @@ -338,11 +338,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) } + return repo } func getBestReferenceType(ref reference) Reference { @@ -681,7 +683,7 @@ func splitDockerDomain(name string) (domain, remainder string) { } // familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain +// to the Docker UI. Familiar names have the default domain // "docker.io" and "library/" repository prefix removed. // For example, "docker.io/library/redis" will have the familiar // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". diff --git a/releases/README.md b/releases/README.md index 3496893..0820fbf 100644 --- a/releases/README.md +++ b/releases/README.md @@ -9,7 +9,7 @@ When moving from rc to final, the rc file may just be renamed and updated. See [release-tool](https://github.com/containerd/release-tool) - 2. Update the version file at `https://github.com/containerd/containerd/blob/master/version/version.go` + 2. Update the version file at `https://github.com/containerd/containerd/blob/main/version/version.go` 3. Update RELEASES.md to refer to the new release and dates. diff --git a/releases/cri-containerd.DEPRECATED.txt b/releases/cri-containerd.DEPRECATED.txt new file mode 100644 index 0000000..0141034 --- /dev/null +++ b/releases/cri-containerd.DEPRECATED.txt @@ -0,0 +1,12 @@ +The "cri-containerd-(cni-)-VERSION-OS-ARCH.tar.gz" release bundle has been deprecated since containerd 1.6, +does not work on some Linux distributions, and will be removed in containerd 2.0. + +Instead of this, install the following components separately, either from the binary or from the source: +* containerd: https://github.com/containerd/containerd/releases +* runc: https://github.com/opencontainers/runc/releases +* CNI plugins: https://github.com/containernetworking/plugins/releases + +The CRI plugin has been included in containerd since containerd 1.1. + +See also the "Getting started" document: +https://github.com/containerd/containerd/blob/main/docs/getting-started.md diff --git a/releases/v1.0.0.toml b/releases/v1.0.0.toml index 05867d6..bd7b196 100644 --- a/releases/v1.0.0.toml +++ b/releases/v1.0.0.toml @@ -10,7 +10,7 @@ previous = "v1.0.0-rc.0" pre_release = false preface = """\ -`containerd` provides a daemon for managing running containers. +`containerd` provides a daemon for managing running containers. The new containerd is designed for use designed for use in container-based systems, like Docker and Kubernetes. It provides minimal abstractions over the @@ -24,8 +24,8 @@ A short list of differences from the 0.2 version of containerd follow: - Snapshot-based container storage system - Content-addressed image storage system - Arbitrary metadata storage for association with containerd objects -- Extensive, namespaced [GRPC API](https://github.com/containerd/containerd/tree/master/api/services) -- Rich API Client ([getting started](https://github.com/containerd/containerd/blob/master/docs/getting-started.md) and [godoc](https://godoc.org/github.com/containerd/containerd)). +- Extensive, namespaced [GRPC API](https://github.com/containerd/containerd/tree/main/api/services) +- Rich API Client ([getting started](https://github.com/containerd/containerd/blob/main/docs/getting-started.md) and [godoc](https://godoc.org/github.com/containerd/containerd)). You can read about the full details of the project in the [README](https://github.com/containerd/containerd/blob/v1.0.0/README.md). @@ -41,7 +41,7 @@ candidate full of stability fixes, hardening, and API improvements. If you run into a new problems, please file an [issue](https://github.com/containerd/containerd/issues). -The [_support horizon_](https://github.com/containerd/containerd/blob/master/RELEASES.md#support-horizon) +The [_support horizon_](https://github.com/containerd/containerd/blob/main/RELEASES.md#support-horizon) for containerd has been updated to include the 1.0 release. With this release, the 0.2 release line is now moved to the "End of Life" state. No new releases on that branch will be made. The 1.0 release train will be considered an active diff --git a/releases/v1.1.0.toml b/releases/v1.1.0.toml index 5d377b5..9817dba 100644 --- a/releases/v1.1.0.toml +++ b/releases/v1.1.0.toml @@ -130,7 +130,7 @@ To set up containerd with Kubernetes... ## Support -The [_support horizon_](https://github.com/containerd/containerd/blob/master/RELEASES.md#support-horizon) +The [_support horizon_](https://github.com/containerd/containerd/blob/main/RELEASES.md#support-horizon) for containerd has been updated to include the 1.1 release. With the addition of the CRI plugin, we are expanding the support horizon for 1.1 to include the entire lifespan of Kubernetes 1.10. The containerd 1.1 release train will be diff --git a/releases/v1.2.0.toml b/releases/v1.2.0.toml index 3e02b69..8d84477 100644 --- a/releases/v1.2.0.toml +++ b/releases/v1.2.0.toml @@ -27,7 +27,7 @@ API. Various runtimes can be selected on a per container basis using the `WithRuntime` opt or to test via ctr `ctr run --runtime io.containerd.runc.v1`. -[Documentation](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md) +[Documentation](https://github.com/containerd/containerd/blob/main/runtime/v2/README.md) ## Updated CRI Plugin @@ -64,7 +64,7 @@ Users can: A new proxy plugin configuration has been added to allow external snapshotters be connected to containerd using gRPC. -[Documentation](https://github.com/containerd/containerd/blob/master/PLUGINS.md) +[Documentation](https://github.com/containerd/containerd/blob/main/PLUGINS.md) ## Managed /opt directory @@ -80,7 +80,7 @@ containerd system. It can also be used to install `runc` and other related tools > ctr install docker.io/crosbymichael/runc:latest ``` -[Documentation](https://github.com/containerd/containerd/blob/master/docs/managed-opt.md) +[Documentation](https://github.com/containerd/containerd/blob/main/docs/managed-opt.md) ## Garbage Collection diff --git a/releases/v1.5.1.toml b/releases/v1.5.1.toml deleted file mode 100644 index 8a573a5..0000000 --- a/releases/v1.5.1.toml +++ /dev/null @@ -1,23 +0,0 @@ -# commit to be tagged for new release -commit = "HEAD" - -project_name = "containerd" -github_repo = "containerd/containerd" -match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" - -# previous release -previous = "v1.5.0" - -pre_release = false - -preface = """\ -The first patch release for containerd 1.5 includes an updated version -of runc and minor fix in the CRI service - -### Notable Updates - -* **Update runc to rc94** [#5473](https://github.com/containerd/containerd/pull/5473) -* **Fix registry mirror authorization logic in CRI plugin** [#5446](https://github.com/containerd/containerd/pull/5446) -* **Fix regression in cri-cni-release to include cri tools** [#5462](https://github.com/containerd/containerd/pull/5462) - -See the changelog for complete list of changes""" diff --git a/releases/v1.5.3.toml b/releases/v1.5.3.toml deleted file mode 100644 index b1e515b..0000000 --- a/releases/v1.5.3.toml +++ /dev/null @@ -1,26 +0,0 @@ -# commit to be tagged for new release -commit = "HEAD" - -project_name = "containerd" -github_repo = "containerd/containerd" -match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" - -# previous release -previous = "v1.5.2" - -pre_release = false - -preface = """\ -The third patch release for containerd 1.5 updates runc to 1.0.0 and contains -various other fixes. - -### Notable Updates - -* **Update runc binary to 1.0.0** [5552](https://github.com/containerd/containerd/pull/5552) -* **Send pod UID to CNI plugins as K8S_POD_UID** [#5640](https://github.com/containerd/containerd/pull/5640) -* **Fix invalid validation error checking** [#5565](https://github.com/containerd/containerd/pull/5565) -* **Fix error on image pull resume** [#5560](https://github.com/containerd/containerd/pull/5560) -* **Fix User Agent sent to registry authentication server** [#5533](https://github.com/containerd/containerd/pull/5533) -* **Fix symlink resolution for disk mounts on Windows** [#5411](https://github.com/containerd/containerd/pull/5411) - -See the changelog for complete list of changes""" diff --git a/releases/v1.5.4.toml b/releases/v1.5.4.toml deleted file mode 100644 index c8d25e0..0000000 --- a/releases/v1.5.4.toml +++ /dev/null @@ -1,14 +0,0 @@ -# commit to be tagged for new release -commit = "HEAD" - -project_name = "containerd" -github_repo = "containerd/containerd" -match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" - -# previous release -previous = "v1.5.3" - -pre_release = false - -preface = """\ -The fourth patch release for containerd 1.5 is a security release to address [CVE-2021-32760](https://github.com/containerd/containerd/security/advisories/GHSA-c72p-9xmj-rx3w).""" diff --git a/releases/v1.5.5.toml b/releases/v1.5.5.toml deleted file mode 100644 index 185cc86..0000000 --- a/releases/v1.5.5.toml +++ /dev/null @@ -1,23 +0,0 @@ -# commit to be tagged for new release -commit = "HEAD" - -project_name = "containerd" -github_repo = "containerd/containerd" -match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" - -# previous release -previous = "v1.5.4" - -pre_release = false - -preface = """\ -The fifth patch release for containerd 1.5 updates runc to 1.0.1 and contains -other minor updates. - -### Notable Updates - -* **Update runc binary to 1.0.1** [#5751](https://github.com/containerd/containerd/pull/5751) -* **Update pull logic to try next mirror on non-404 response** [#5275](https://github.com/containerd/containerd/pull/5275) -* **Update pull authorization logic on redirect** [#5504](https://github.com/containerd/containerd/pull/5504) - -See the changelog for complete list of changes""" diff --git a/releases/v1.5.6.toml b/releases/v1.5.6.toml deleted file mode 100644 index 3cbe8de..0000000 --- a/releases/v1.5.6.toml +++ /dev/null @@ -1,26 +0,0 @@ -# commit to be tagged for new release -commit = "HEAD" - -project_name = "containerd" -github_repo = "containerd/containerd" -match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" - -# previous release -previous = "v1.5.5" - -pre_release = false - -preface = """\ -The sixth patch release for containerd 1.5 contains minor fixes and updates -including an updated runc and hcsshim. - -### Notable Updates - -* **Install apparmor parser for arm64 and update seccomp to 2.5.1** [#5763](https://github.com/containerd/containerd/pull/5763) -* **Update runc binary to 1.0.2** [#5899](https://github.com/containerd/containerd/pull/5899) -* **Update hcsshim to v0.8.21 to fix layer issue on Windows Server 2019** [#5942](https://github.com/containerd/containerd/pull/5942) -* **Add support for 'clone3' syscall to fix issue with certain images when seccomp is enabled** [#5982](https://github.com/containerd/containerd/pull/5982) -* **Add image config labels in CRI container creation** [#6012](https://github.com/containerd/containerd/pull/6012) -* **Fix panic in metadata content writer on copy error** [#6043](https://github.com/containerd/containerd/pull/6043) - -See the changelog for complete list of changes""" diff --git a/releases/v1.5.8.toml b/releases/v1.5.8.toml deleted file mode 100644 index 6fa527d..0000000 --- a/releases/v1.5.8.toml +++ /dev/null @@ -1,27 +0,0 @@ -# commit to be tagged for new release -commit = "HEAD" - -project_name = "containerd" -github_repo = "containerd/containerd" -match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" - -# previous release -previous = "v1.5.7" - -pre_release = false - -preface = """\ -The eighth patch release for containerd 1.5 contains a mitigation for [CVE-2021-41190](https://github.com/opencontainers/distribution-spec/security/advisories/GHSA-mc8v-mgrf-8f4m) -as well as several fixes and updates. - -### Notable Updates -* **Handle ambiguous OCI manifest parsing** ([GHSA-5j5w-g665-5m35](https://github.com/containerd/containerd/security/advisories/GHSA-5j5w-g665-5m35)) -* **Filter selinux xattr for image volumes in CRI plugin** ([#5104](https://github.com/containerd/containerd/pull/5104)) -* **Use DeactiveLayer to unlock layers that cannot be renamed in Windows snapshotter** ([#5422](https://github.com/containerd/containerd/pull/5422)) -* **Fix pull failure on unexpected EOF** ([#5921](https://github.com/containerd/containerd/pull/5921)) -* **Close task IO before waiting on delete** ([#5974](https://github.com/containerd/containerd/pull/5974)) -* **Log a warning for ignored invalid image labels rather than erroring** ([#6124](https://github.com/containerd/containerd/pull/6124)) -* **Update pull to handle of non-https urls in descriptors** ([#6221](https://github.com/containerd/containerd/pull/6221)) - - -See the changelog for complete list of changes""" diff --git a/releases/v1.5.9.toml b/releases/v1.5.9.toml deleted file mode 100644 index b307170..0000000 --- a/releases/v1.5.9.toml +++ /dev/null @@ -1,20 +0,0 @@ -# commit to be tagged for new release -commit = "HEAD" - -project_name = "containerd" -github_repo = "containerd/containerd" -match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" - -# previous release -previous = "v1.5.8" - -pre_release = false - -preface = """\ -The ninth patch release for containerd 1.5 is a security release to fix CVE-2021-43816. - -### Notable Updates -* **Fix unprivileged pod using 'hostPath' bypassing SELinux labels** ([GHSA-mvff-h3cj-wj9c](https://github.com/containerd/containerd/security/advisories/GHSA-mvff-h3cj-wj9c)) -* **Fix setting the "container_kvm_t" SELinux label** ([#6381](https://github.com/containerd/containerd/pull/6381)) - -See the changelog for complete list of changes""" diff --git a/releases/v1.6.0.toml b/releases/v1.6.0.toml new file mode 100644 index 0000000..3761ea3 --- /dev/null +++ b/releases/v1.6.0.toml @@ -0,0 +1,78 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.5.0" + +pre_release = false + +preface = """\ +The seventh major release of containerd includes many improvements and added +support to increase overall compatibility and stability. + +### Highlights + +#### Runtime + +* **Add runtime label to metrics** ([#5744](https://github.com/containerd/containerd/pull/5744)) +* **Cleanup task delete logic in v2 shim** ([#5813](https://github.com/containerd/containerd/pull/5813)) +* **Add support for shim plugins** ([#5817](https://github.com/containerd/containerd/pull/5817)) +* **Handle sigint and sigterm in shimv2** ([#5828](https://github.com/containerd/containerd/pull/5828)) +* **Decouple shim and task manager** ([#5918](https://github.com/containerd/containerd/pull/5918)) +* **Add runc shim support for core scheduling** ([#6011](https://github.com/containerd/containerd/pull/6011)) +* **Update shim client connect attempt to fail fast when shim errors** ([#6031](https://github.com/containerd/containerd/pull/6031)) +* **Add support for absolute path to shim binaries** ([#6206](https://github.com/containerd/containerd/pull/6206)) +* **Update runc to v1.1.0** ([#6375](https://github.com/containerd/containerd/pull/6375)) + +#### Windows + +* **Add support for Windows HostProcess containers** ([#5131](https://github.com/containerd/containerd/pull/5131)) +* **Add support for Windows resource limits** ([#5778](https://github.com/containerd/containerd/pull/5778)) + +#### CRI + +* **Add CNI configuration based on runtime class** ([#4695](https://github.com/containerd/containerd/pull/4695)) +* **Add support for Intel RDT** ([#5439](https://github.com/containerd/containerd/pull/5439)) +* **Add support for CRI v1 and v1alpha in parallel** ([#5619](https://github.com/containerd/containerd/pull/5619)) +* **Add support for unified resources field for cgroups v2** ([#5627](https://github.com/containerd/containerd/pull/5627)) +* **Add IP preference configuration for reporting pod IP** ([#5964](https://github.com/containerd/containerd/pull/5964)) +* **Implement new CRI pod sandbox stats API** ([#6113](https://github.com/containerd/containerd/pull/6113)) +* **Add sandbox and container latency metrics** ([#6111](https://github.com/containerd/containerd/pull/6111)) +* **Add namespace to ttrpc and grpc plugin connections** ([#6130](https://github.com/containerd/containerd/pull/6130)) +* **Add option to allow ping sockets and privileged ports with no capabilities** ([#6170](https://github.com/containerd/containerd/pull/6170)) +* **Add support for configuring swap** ([#6320](https://github.com/containerd/containerd/pull/6320)) + +#### Other + +* **Add support for client TLS Auth for grpc** ([#5606](https://github.com/containerd/containerd/pull/5606)) +* **Add xfs support for devicemapper snapshotter** ([#5610](https://github.com/containerd/containerd/pull/5610)) +* **Add metric exposing build version and revision** ([#5965](https://github.com/containerd/containerd/pull/5965)) +* **Add support for custom fs options in devmapper snapshotter** ([#6122](https://github.com/containerd/containerd/pull/6122)) +* **Update introspection service to show GRPC plugins** ([#6432](https://github.com/containerd/containerd/pull/6432)) + +#### Client + +* **Allow WithServices to use custom implementations** ([#5709](https://github.com/containerd/containerd/pull/5709)) +* **Support custom compressor for walking differ** ([#5735](https://github.com/containerd/containerd/pull/5735)) + +### Release Updates + +#### containerd/CNI/runc/critools tar bundle + +The tar bundles released as `cri-containerd-*.tar.gz` contain a build of runc +linked with a newer version of libseccomp. This dynamically-linked build of runc +was built on Ubuntu 18.04 and will not work on some other distributions, such as +RHEL 7 and Debian 10. Users of such distributions may get a statically-linked +runc binary from https://github.com/opencontainers/runc/releases or build runc +for their own environment. + +> **_Deprecation_** These tar bundles are now deprecated and will be removed or +> replaced in containerd 2.0. Projects relying on these tar bundles should use +> the `containerd-*.tar.gz` bundles or work with the containerd community on a +> suitable replacement in containerd 2.0. + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.1.toml b/releases/v1.6.1.toml new file mode 100644 index 0000000..99a0d7e --- /dev/null +++ b/releases/v1.6.1.toml @@ -0,0 +1,23 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.0" + +pre_release = false + +preface = """\ +The first patch release for containerd 1.6 includes a fix for +[CVE-2022-23648](https://github.com/containerd/containerd/security/advisories/GHSA-crp2-qrr5-8pq7) +and other issues. + +### Notable Updates + +* **Use fs.RootPath when mounting volumes** ([GHSA-crp2-qrr5-8pq7](https://github.com/containerd/containerd/security/advisories/GHSA-crp2-qrr5-8pq7)) +* **Return init pid when clean dead shim in runc.v1/v2 shims** ([#6572](https://github.com/containerd/containerd/pull/6572)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.10.toml b/releases/v1.6.10.toml new file mode 100644 index 0000000..3a89a6c --- /dev/null +++ b/releases/v1.6.10.toml @@ -0,0 +1,22 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.9" + +pre_release = false + +preface = """\ +The tenth patch release for containerd 1.6 contains various fixes, including a CVE fix for Windows platforms. + +### Notable Updates + +* **Always check userxattr for overlay on kernels >= 5.11** ([#7646](https://github.com/containerd/containerd/pull/7646)) +* **Bump hcsshim to 0.9.5 to fix container shutdown bug on Windows** ([#7610](https://github.com/containerd/containerd/pull/7610) +* **Bump Go version to 1.18.8 to address CVE-2022-41716** ([#7634](https://github.com/containerd/containerd/pull/7634)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.11.toml b/releases/v1.6.11.toml new file mode 100644 index 0000000..a852e96 --- /dev/null +++ b/releases/v1.6.11.toml @@ -0,0 +1,22 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.10" + +pre_release = false + +preface = """\ +The eleventh patch release for containerd 1.6 contains a various fixes and updates. + +### Notable Updates +* **Add pod UID annotation in CRI plugin** ([#7735](https://github.com/containerd/containerd/pull/7735)) +* **Fix nil pointer deference for Windows containers in CRI plugin** ([#7737](https://github.com/containerd/containerd/pull/7737)) +* **Fix lease labels unexpectedly overwriting expiration** ([#7745](https://github.com/containerd/containerd/pull/7745)) +* **Fix for simultaneous diff creation using the same parent snapshot** ([#7756](https://github.com/containerd/containerd/pull/7756)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.12.toml b/releases/v1.6.12.toml new file mode 100644 index 0000000..cdd0819 --- /dev/null +++ b/releases/v1.6.12.toml @@ -0,0 +1,19 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.11" + +pre_release = false + +preface = """\ +The twelfth patch release for containerd 1.6 contains a fix for CVE-2022-23471. + +### Notable Updates +* **Fix goroutine leak during Exec in CRI plugin** ([GHSA-2qjp-425j-52j9](https://github.com/containerd/containerd/security/advisories/GHSA-2qjp-425j-52j9)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.13.toml b/releases/v1.6.13.toml new file mode 100644 index 0000000..b32a384 --- /dev/null +++ b/releases/v1.6.13.toml @@ -0,0 +1,26 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.12" + +pre_release = false + +preface = """\ +The thirteenth patch release for containerd 1.6 contains various fixes and updates. + +### Notable Updates + +* **Update overlay snapshotter to check for tmpfs when evaluating usage of userxattr** ([#7788](https://github.com/containerd/containerd/pull/7788)) +* **Update hcsschim to v0.9.6 to fix resource leak on exec** ([#7808](https://github.com/containerd/containerd/pull/7808)) +* **Make swapping disabled with memory limit in CRI plugin** ([#7815](https://github.com/containerd/containerd/pull/7815)) +* **Allow clients to remove created tasks with PID 0** ([#7816](https://github.com/containerd/containerd/pull/7816)) +* **Fix concurrent map iteration and map write in CRI port forwarding** ([#7819](https://github.com/containerd/containerd/pull/7819)) +* **Check for nil `HugepageLimits` to avoid panic in CRI plugin** ([#7820](https://github.com/containerd/containerd/pull/7820)) + + +See the changelog for complete list of changes""" diff --git a/releases/v1.5.7.toml b/releases/v1.6.14.toml similarity index 50% rename from releases/v1.5.7.toml rename to releases/v1.6.14.toml index 10c0139..8afb02e 100644 --- a/releases/v1.5.7.toml +++ b/releases/v1.6.14.toml @@ -6,14 +6,15 @@ github_repo = "containerd/containerd" match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" # previous release -previous = "v1.5.6" +previous = "v1.6.13" pre_release = false preface = """\ -The seventh patch release for containerd 1.5 is a security release to fix CVE-2021-41103. +The fourteenth patch release for containerd 1.6 fixes a regression in the CRI plugin related to swap ### Notable Updates -* **Fix insufficiently restricted permissions on container root and plugin directories** [GHSA-c2h3-6mxw-7mvq](https://github.com/containerd/containerd/security/advisories/GHSA-c2h3-6mxw-7mvq) + +* **Fix `memory.memsw.limit_in_bytes: no such file or directory` error in CRI plugin** ([#7838](https://github.com/containerd/containerd/pull/7838)) See the changelog for complete list of changes""" diff --git a/releases/v1.6.15.toml b/releases/v1.6.15.toml new file mode 100644 index 0000000..7798e81 --- /dev/null +++ b/releases/v1.6.15.toml @@ -0,0 +1,20 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.14" + +pre_release = false + +preface = """\ +The fifteenth patch release for containerd 1.6 fixes an issue with CNI in the CRI plugin + +### Notable Updates + +* **Fix no CNI info for pod sandbox on restart in CRI plugin** ([#7848](https://github.com/containerd/containerd/pull/7848)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.16.toml b/releases/v1.6.16.toml new file mode 100644 index 0000000..7568819 --- /dev/null +++ b/releases/v1.6.16.toml @@ -0,0 +1,23 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.15" + +pre_release = false + +preface = """\ +The sixteenth patch release for containerd 1.6 includes various bug fixes and updates. + +### Notable Updates + +* **Fix push error propagation** ([#7990](https://github.com/containerd/containerd/pull/7990)) +* **Fix slice append error with HugepageLimits for Linux** ([#7995](https://github.com/containerd/containerd/pull/7995)) +* **Update default seccomp profile for PKU and CAP_SYS_NICE** ([#8001](https://github.com/containerd/containerd/pull/8001)) +* **Fix overlayfs error when upperdirlabel option is set** ([#8002](https://github.com/containerd/containerd/pull/8002)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.17.toml b/releases/v1.6.17.toml new file mode 100644 index 0000000..ff728cd --- /dev/null +++ b/releases/v1.6.17.toml @@ -0,0 +1,23 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.16" + +pre_release = false + +preface = """\ +The seventeenth patch release for containerd 1.6 includes various updates. + +### Notable Updates + +* **Add network plugin metrics** ([#8018](https://github.com/containerd/containerd/pull/8018)) +* **Update mkdir permission on /etc/cni to 0755 instead of 0700** ([#8030](https://github.com/containerd/containerd/pull/8030)) +* **Export remote snapshotter label handler** ([#8054](https://github.com/containerd/containerd/pull/8054)) +* **Add support for default hosts.toml configuration** ([#8065](https://github.com/containerd/containerd/pull/8065)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.18.toml b/releases/v1.6.18.toml new file mode 100644 index 0000000..f19353a --- /dev/null +++ b/releases/v1.6.18.toml @@ -0,0 +1,24 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.17" + +pre_release = false + +preface = """\ +The eighteenth patch release for containerd 1.6 includes fixes for CVE-2023-25153 and CVE-2023-25173 +along with a security update for Go. + +### Notable Updates + +* **Fix OCI image importer memory exhaustion** ([GHSA-259w-8hf6-59c2](https://github.com/containerd/containerd/security/advisories/GHSA-259w-8hf6-59c2)) +* **Fix supplementary groups not being set up properly** ([GHSA-hmfx-3pcx-653p](https://github.com/containerd/containerd/security/advisories/GHSA-hmfx-3pcx-653p)) +* **Revert removal of `/sbin/apparmor_parser` check** ([#8087](https://github.com/containerd/containerd/pull/8087)) +* **Update Go to 1.19.6** ([#8111](https://github.com/containerd/containerd/pull/8111)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.19.toml b/releases/v1.6.19.toml new file mode 100644 index 0000000..08e39c5 --- /dev/null +++ b/releases/v1.6.19.toml @@ -0,0 +1,20 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.18" + +pre_release = false + +preface = """\ +The nineteenth patch release for containerd 1.6 contains runtime fixes and additions for Windows platforms + +### Notable Updates + +* **Update hcsshim to v0.9.7 to include fix for graceful termination and pause containers ([#8153](https://github.com/containerd/containerd/pull/8153)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.2.toml b/releases/v1.6.2.toml new file mode 100644 index 0000000..1c9d618 --- /dev/null +++ b/releases/v1.6.2.toml @@ -0,0 +1,21 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.1" + +pre_release = false + +preface = """\ +The second patch release for containerd 1.6 includes a fix for +[CVE-2022-24769](https://github.com/containerd/containerd/security/advisories/GHSA-c9cp-9c75-9v8c). + +### Notable Updates + +* **Fix the inheritable capability defaults** ([GHSA-c9cp-9c75-9v8c](https://github.com/containerd/containerd/security/advisories/GHSA-c9cp-9c75-9v8c)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.20.toml b/releases/v1.6.20.toml new file mode 100644 index 0000000..d564725 --- /dev/null +++ b/releases/v1.6.20.toml @@ -0,0 +1,27 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.19" + +pre_release = false + +preface = """\ +The twentieth patch release for containerd 1.6 contains various fixes and updates. + +### Notable Updates + +* **Disable looking up usernames and groupnames on host** ([#8230](https://github.com/containerd/containerd/pull/8230)) +* **Add support for Windows ArgsEscaped images** ([#8273](https://github.com/containerd/containerd/pull/8273)) +* **Update hcsshim to v0.9.8** ([#8274](https://github.com/containerd/containerd/pull/8274)) +* **Fix debug flag in shim** ([#8288](https://github.com/containerd/containerd/pull/8288)) +* **Add `WithReadonlyTempMount` to support readonly temporary mounts** ([#8299](https://github.com/containerd/containerd/pull/8299)) +* **Update ttrpc to fix file descriptor leak** ([#8308](https://github.com/containerd/containerd/pull/8308)) +* **Update runc binary to v1.1.5** ([#8324](https://github.com/containerd/containerd/pull/8324)) +* **Update image config to support ArgsEscaped** ([#8306](https://github.com/containerd/containerd/pull/8306)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.21.toml b/releases/v1.6.21.toml new file mode 100644 index 0000000..b11e448 --- /dev/null +++ b/releases/v1.6.21.toml @@ -0,0 +1,36 @@ +# commit to be tagged for new release +commit = "HEAD" + +# project_name is used to refer to the project in the notes +project_name = "containerd" + +# github_repo is the github project, only github is currently supported +github_repo = "containerd/containerd" + +# match_deps is a pattern to determine which dependencies should be included +# as part of this release. The changelog will also include changes for these +# dependencies based on the change in the dependency's version. +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release of this project for determining changes +previous = "v1.6.20" + +# pre_release is whether to include a disclaimer about being a pre-release +pre_release = false + +# preface is the description of the release which precedes the author list +# and changelog. This description could include highlights as well as any +# description of changes. Use markdown formatting. +preface = """\ +The twenty-first patch release for containerd 1.6 contains various fixes and updates. + +### Notable Updates + +* **update runc binary to v1.1.7 ([#8450](https://github.com/containerd/containerd/pull/8450)) +* **Remove entry for container from container store on error ([#8456](https://github.com/containerd/containerd/pull/8456)) +* **oci: partially restore comment on read-only mounts for uid/gid uses ([#8403](https://github.com/containerd/containerd/pull/8403)) +* **windows: Add ArgsEscaped support for CRI ([#8247](https://github.com/containerd/containerd/pull/8247)) +* **oci: Use WithReadonlyTempMount when adding users/groups ([#8357](https://github.com/containerd/containerd/pull/8357)) +* **archive: consistently respect value of WithSkipDockerManifest ([#8345](https://github.com/containerd/containerd/pull/8345)) + +See the changelog for complete list of changes""" \ No newline at end of file diff --git a/releases/v1.6.22.toml b/releases/v1.6.22.toml new file mode 100644 index 0000000..3dc7b88 --- /dev/null +++ b/releases/v1.6.22.toml @@ -0,0 +1,47 @@ +# commit to be tagged for new release +commit = "HEAD" + +# project_name is used to refer to the project in the notes +project_name = "containerd" + +# github_repo is the github project, only github is currently supported +github_repo = "containerd/containerd" + +# match_deps is a pattern to determine which dependencies should be included +# as part of this release. The changelog will also include changes for these +# dependencies based on the change in the dependency's version. +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release of this project for determining changes +previous = "v1.6.21" + +# pre_release is whether to include a disclaimer about being a pre-release +pre_release = false + +# preface is the description of the release which precedes the author list +# and changelog. This description could include highlights as well as any +# description of changes. Use markdown formatting. +preface = """\ +The twenty-second patch release for containerd 1.6 contains various fixes and updates. + +### Notable Updates + +* **RunC: Update runc binary to v1.1.8** ([#8842](https://github.com/containerd/containerd/pull/8842)) +* **CRI: Fix additionalGids: it should fallback to imageConfig.User when securityContext.RunAsUser,RunAsUsername are empty** ([#8823](https://github.com/containerd/containerd/pull/8823)) +* **CRI: Write generated CNI config atomically** ([#8826](https://github.com/containerd/containerd/pull/8826)) +* **Fix concurrent writes for `UpdateContainerStats`** ([#8819](https://github.com/containerd/containerd/pull/8819)) +* **Make checkContainerTimestamps less strict on Windows** ([#8827](https://github.com/containerd/containerd/pull/8827)) +* **Port-Forward: Correctly handle known errors** ([#8805](https://github.com/containerd/containerd/pull/8805)) +* **Resolve docker.NewResolver race condition** ([#8800](https://github.com/containerd/containerd/pull/8800)) +* **SecComp: Always allow `name_to_handle_at`** ([#8754](https://github.com/containerd/containerd/pull/8754)) +* **Adding support to run hcsshim from local clone** ([#8713](https://github.com/containerd/containerd/pull/8713)) +* **Pinned image support** ([#8720](https://github.com/containerd/containerd/pull/8720)) +* **Runtime/V2/RunC: Handle early exits w/o big locks** ([#8695](https://github.com/containerd/containerd/pull/8695)) +* **CRITool: Move up to CRI-TOOLS v1.27.0** ([#7997](https://github.com/containerd/containerd/pull/7997)) +* **Fix cpu architecture detection issue on emulated ARM platform** ([#8533](https://github.com/containerd/containerd/pull/8533)) +* **Task: Don't `close()` io before `cancel()`** ([#8659](https://github.com/containerd/containerd/pull/8659)) +* **Fix panic when remote differ returns empty result** ([#8640](https://github.com/containerd/containerd/pull/8640)) +* **Plugins: Notify readiness when registered plugins are ready** ([#8583](https://github.com/containerd/containerd/pull/8583)) +* **Unwrap io errors in server connection receive error handling** ([ttrpc#143](https://github.com/containerd/ttrpc/pull/143)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.23.toml b/releases/v1.6.23.toml new file mode 100644 index 0000000..4996666 --- /dev/null +++ b/releases/v1.6.23.toml @@ -0,0 +1,35 @@ +# commit to be tagged for new release +commit = "HEAD" + +# project_name is used to refer to the project in the notes +project_name = "containerd" + +# github_repo is the github project, only github is currently supported +github_repo = "containerd/containerd" + +# match_deps is a pattern to determine which dependencies should be included +# as part of this release. The changelog will also include changes for these +# dependencies based on the change in the dependency's version. +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release of this project for determining changes +previous = "v1.6.22" + +# pre_release is whether to include a disclaimer about being a pre-release +pre_release = false + +# preface is the description of the release which precedes the author list +# and changelog. This description could include highlights as well as any +# description of changes. Use markdown formatting. +preface = """\ +The twenty-third patch release for containerd 1.6 contains various fixes and updates. + +### Notable Updates + +* **Add stable ABI support in windows platform matcher + update hcsshim tag ([#8854](https://github.com/containerd/containerd/pull/8854)) +* **cri: Don't use rel path for image volumes ([#8927](https://github.com/containerd/containerd/pull/8927)) +* **Upgrade GitHub actions packages in release workflow ([#8908](https://github.com/containerd/containerd/pull/8908)) +* **update to go1.19.12 ([#8905](https://github.com/containerd/containerd/pull/8905)) +* **backport: ro option for userxattr mount check + cherry-pick: Fix ro mount option being passed ([#8888](https://github.com/containerd/containerd/pull/8888)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.24.toml b/releases/v1.6.24.toml new file mode 100644 index 0000000..ecef755 --- /dev/null +++ b/releases/v1.6.24.toml @@ -0,0 +1,35 @@ +# commit to be tagged for new release +commit = "HEAD" + +# project_name is used to refer to the project in the notes +project_name = "containerd" + +# github_repo is the github project, only github is currently supported +github_repo = "containerd/containerd" + +# match_deps is a pattern to determine which dependencies should be included +# as part of this release. The changelog will also include changes for these +# dependencies based on the change in the dependency's version. +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release of this project for determining changes +previous = "v1.6.23" + +# pre_release is whether to include a disclaimer about being a pre-release +pre_release = false + +# preface is the description of the release which precedes the author list +# and changelog. This description could include highlights as well as any +# description of changes. Use markdown formatting. +preface = """\ +The twenty-fourth patch release for containerd 1.6 contains various fixes and updates. + +### Notable Updates + +* **CRI: fix leaked shim caused by high IO pressure** ([#9004](https://github.com/containerd/containerd/pull/9004)) +* **Update to go1.20.8** ([#9073](https://github.com/containerd/containerd/pull/9073)) +* **Update runc to v1.1.9** ([#8966](https://github.com/containerd/containerd/pull/8966)) +* **Backport: add configurable mount options to overlay snapshotter** ([#8961](https://github.com/containerd/containerd/pull/8961)) +* **log: cleanups and improvements to decouple more from logrus** ([#9002](https://github.com/containerd/containerd/pull/9002)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.3.toml b/releases/v1.6.3.toml new file mode 100644 index 0000000..ed1de25 --- /dev/null +++ b/releases/v1.6.3.toml @@ -0,0 +1,27 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.2" + +pre_release = false + +preface = """\ +The third patch release for containerd 1.6 includes various fixes and updates. + +### Notable Updates + +* **Fix panic when configuring tracing plugin** ([#6853](https://github.com/containerd/containerd/pull/6853)) +* **Improve image pull performance in CRI plugin** ([#6816](https://github.com/containerd/containerd/pull/6816)) +* **Check for duplicate nspath** ([#6813](https://github.com/containerd/containerd/pull/6813)) +* **Fix deadlock in cgroup metrics collector** ([#6801](https://github.com/containerd/containerd/pull/6801)) +* **Mount devmapper xfs file system with "nouuid" option** ([#6731](https://github.com/containerd/containerd/pull/6731)) +* **Make the temp mount as ready only in container WithVolumes** ([#6730](https://github.com/containerd/containerd/pull/6730)) +* **Fix deadlock from leaving transaction open in native snapshotter** ([#6727](https://github.com/containerd/containerd/pull/6727)) +* **Monitor OOMKill events to prevent missing container events** ([#6734](https://github.com/containerd/containerd/pull/6734)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.4.toml b/releases/v1.6.4.toml new file mode 100644 index 0000000..dc38bf7 --- /dev/null +++ b/releases/v1.6.4.toml @@ -0,0 +1,21 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.3" + +pre_release = false + +preface = """\ +The fourth patch release for containerd 1.6 includes two fixes for CNI and SELinux. + +### Notable Updates + +* **Update go-cni to fix teardown regression** ([#6877](https://github.com/containerd/containerd/pull/6877)) +* **Fix broken SELinux relabeling for Kubernetes volume mounts** ([#6878](https://github.com/containerd/containerd/pull/6878)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.5.toml b/releases/v1.6.5.toml new file mode 100644 index 0000000..47b6723 --- /dev/null +++ b/releases/v1.6.5.toml @@ -0,0 +1,22 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.4" + +pre_release = false + +preface = """\ +The fifth patch release for containerd 1.6 includes a few fixes and updated +version of runc. + +### Notable Updates + +* **Fix for older CNI plugins not reporting version** ([#7011](https://github.com/containerd/containerd/pull/7011)) +* **Fix mount path handling for CRI plugin on Windows** ([#6929](https://github.com/containerd/containerd/pull/6929)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.6.toml b/releases/v1.6.6.toml new file mode 100644 index 0000000..2502db9 --- /dev/null +++ b/releases/v1.6.6.toml @@ -0,0 +1,21 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.5" + +pre_release = false + +preface = """\ +The sixth patch release for containerd 1.6 includes a fix for +[GHSA-5ffw-gxpp-mxpf](https://github.com/containerd/containerd/security/advisories/GHSA-5ffw-gxpp-mxpf). + +### Notable Updates + +* **Fix ExecSync handler to cap console output size** ([GHSA-5ffw-gxpp-mxpf](https://github.com/containerd/containerd/security/advisories/GHSA-5ffw-gxpp-mxpf)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.7.toml b/releases/v1.6.7.toml new file mode 100644 index 0000000..a180185 --- /dev/null +++ b/releases/v1.6.7.toml @@ -0,0 +1,28 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.6" + +pre_release = false + +preface = """\ +The seventh patch release for containerd 1.6 contains various fixes, +includes a new version of runc and adds support for ppc64le and riscv64 +(requires unreleased runc 1.2) builds. + +### Notable Updates + +* **Update runc to v1.1.3** ([#7036](https://github.com/containerd/containerd/pull/7036)) +* **Seccomp: Allow clock_settime64 with CAP_SYS_TIME** ([#7172](https://github.com/containerd/containerd/pull/7172)) +* **Fix WWW-Authenticate parsing** ([#7131](https://github.com/containerd/containerd/pull/7131)) +* **Support RISC-V 64 and ppc64le builds** ([#7170](https://github.com/containerd/containerd/pull/7170)) +* **Windows: Update hcsshim to v0.9.4 to fix regression with HostProcess stats** ([#7200](https://github.com/containerd/containerd/pull/7200)) +* **Windows: Fix shim logs going to panic.log file** ([#7242](https://github.com/containerd/containerd/pull/7242)) +* **Allow ptrace(2) by default for kernels >= 4.8** ([#7171](https://github.com/containerd/containerd/pull/7171)) + +See the changelog for complete list of changes""" diff --git a/releases/v1.5.2.toml b/releases/v1.6.8.toml similarity index 51% rename from releases/v1.5.2.toml rename to releases/v1.6.8.toml index 36d6b9a..e71128e 100644 --- a/releases/v1.5.2.toml +++ b/releases/v1.6.8.toml @@ -6,10 +6,12 @@ github_repo = "containerd/containerd" match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" # previous release -previous = "v1.5.1" +previous = "v1.6.7" pre_release = false preface = """\ -The second patch release for containerd 1.5 is a security release to update -runc for [CVE-2021-30465](https://github.com/opencontainers/runc/security/advisories/GHSA-c3xm-pvg7-gh7r)""" +The eighth patch release for containerd 1.6 fixes a regression in the release +build binaries which limited the environments they could be run in. + +See the changelog for complete list of changes""" diff --git a/releases/v1.6.9.toml b/releases/v1.6.9.toml new file mode 100644 index 0000000..a1ae5ca --- /dev/null +++ b/releases/v1.6.9.toml @@ -0,0 +1,33 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" + +# previous release +previous = "v1.6.8" + +pre_release = false + +preface = """\ +The ninth patch release for containerd 1.6 contains various fixes, reorders the pod setup workflow in the CRI plugin to +prevent CNI resource leaks, and includes a new version of runc. + +### Notable Updates + +* **Update oci.WithDefaultUnixDevices(): remove tun/tap from the default devices** ([#7268](https://github.com/containerd/containerd/pull/7268)) +* **Fix CRI: Do not append []string{""} to command to preserve Docker compatibility** ([#7298](https://github.com/containerd/containerd/pull/7298)) +* **Enhance CRI: ContainerStatus to return container resources** ([#7410](https://github.com/containerd/containerd/pull/7410)) +* **Fix OCI resolver to skip TLS verification for localhost** ([#7438](https://github.com/containerd/containerd/pull/7438) +* **Fix createTarFile: make xattr EPERM non-fatal** ([#7447](https://github.com/containerd/containerd/pull/7447)) +* **Fix CRI plugin to setup pod network after creating the sandbox container** ([#7456](https://github.com/containerd/containerd/pull/7456)) +* **Fix OCI pusher to retry request on writer reset** ([#7461](https://github.com/containerd/containerd/pull/7461)) +* **Fix archive to validate digests before use** ([#7490](https://github.com/containerd/containerd/pull/7490)) +* **Migrate from k8s.gcr.io to registry.k8s.io** ([#7549](https://github.com/containerd/containerd/pull/7549)) +* **Fix CRI: PodSandboxStatus should tolerate missing task** ([#7551](https://github.com/containerd/containerd/pull/7551)) +* **Fix io.containerd.runc.v1: Stats() shouldn't assume s.container is non-nil** ([#7557](https://github.com/containerd/containerd/pull/7557)) +* **Enhance CRI plugin to add logging volume metrics** ([#7571](https://github.com/containerd/containerd/pull/7571)) +* **Add support for CAP_BPF and CAP_PERFMON** ([#7574](https://github.com/containerd/containerd/pull/7574)) + +See the changelog for complete list of changes""" diff --git a/remotes/docker/auth/fetch.go b/remotes/docker/auth/fetch.go index 8b0a87e..c259873 100644 --- a/remotes/docker/auth/fetch.go +++ b/remotes/docker/auth/fetch.go @@ -19,6 +19,8 @@ package auth import ( "context" "encoding/json" + "errors" + "fmt" "net/http" "net/url" "strings" @@ -27,7 +29,6 @@ import ( "github.com/containerd/containerd/log" remoteserrors "github.com/containerd/containerd/remotes/errors" "github.com/containerd/containerd/version" - "github.com/pkg/errors" "golang.org/x/net/context/ctxhttp" ) @@ -46,7 +47,7 @@ func GenerateTokenOptions(ctx context.Context, host, username, secret string, c realmURL, err := url.Parse(realm) if err != nil { - return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") + return TokenOptions{}, fmt.Errorf("invalid token auth challenge realm: %w", err) } to := TokenOptions{ @@ -58,7 +59,7 @@ func GenerateTokenOptions(ctx context.Context, host, username, secret string, c scope, ok := c.Parameters["scope"] if ok { - to.Scopes = append(to.Scopes, scope) + to.Scopes = append(to.Scopes, strings.Split(scope, " ")...) } else { log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") } @@ -73,6 +74,15 @@ type TokenOptions struct { Scopes []string Username string Secret string + + // FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token. + // + // For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request. + // https://docs.docker.com/registry/spec/auth/token/#requesting-a-token + // + // For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request. + // https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token + FetchRefreshToken bool } // OAuthTokenResponse is response from fetching token with a OAuth POST request @@ -101,6 +111,9 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. form.Set("username", to.Username) form.Set("password", to.Secret) } + if to.FetchRefreshToken { + form.Set("access_type", "offline") + } req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) if err != nil { @@ -121,18 +134,18 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + return nil, remoteserrors.NewUnexpectedStatusErr(resp) } decoder := json.NewDecoder(resp.Body) var tr OAuthTokenResponse if err = decoder.Decode(&tr); err != nil { - return nil, errors.Wrap(err, "unable to decode token response") + return nil, fmt.Errorf("unable to decode token response: %w", err) } if tr.AccessToken == "" { - return nil, errors.WithStack(ErrNoToken) + return nil, ErrNoToken } return &tr, nil @@ -175,6 +188,10 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t req.SetBasicAuth(to.Username, to.Secret) } + if to.FetchRefreshToken { + reqParams.Add("offline_token", "true") + } + req.URL.RawQuery = reqParams.Encode() resp, err := ctxhttp.Do(ctx, client, req) @@ -184,14 +201,14 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + return nil, remoteserrors.NewUnexpectedStatusErr(resp) } decoder := json.NewDecoder(resp.Body) var tr FetchTokenResponse if err = decoder.Decode(&tr); err != nil { - return nil, errors.Wrap(err, "unable to decode token response") + return nil, fmt.Errorf("unable to decode token response: %w", err) } // `access_token` is equivalent to `token` and if both are specified @@ -202,7 +219,7 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t } if tr.Token == "" { - return nil, errors.WithStack(ErrNoToken) + return nil, ErrNoToken } return &tr, nil diff --git a/remotes/docker/auth/fetch_test.go b/remotes/docker/auth/fetch_test.go new file mode 100644 index 0000000..087bd62 --- /dev/null +++ b/remotes/docker/auth/fetch_test.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "context" + "reflect" + "strings" + "testing" +) + +func TestGenerateTokenOptions(t *testing.T) { + for _, tc := range []struct { + name string + realm string + service string + username string + secret string + scope string + }{ + { + name: "MultipleScopes", + realm: "https://test-realm.com", + service: "registry-service", + username: "username", + secret: "secret", + scope: "repository:foo/bar:pull repository:foo/bar:pull,push", + }, + { + name: "SingleScope", + realm: "https://test-realm.com", + service: "registry-service", + username: "username", + secret: "secret", + scope: "repository:foo/bar:pull", + }, + { + name: "NoScope", + realm: "https://test-realm.com", + service: "registry-service", + username: "username", + secret: "secret", + }, + } { + t.Run(tc.name, func(t *testing.T) { + c := Challenge{ + Scheme: BearerAuth, + Parameters: map[string]string{ + "realm": tc.realm, + "service": tc.service, + "scope": tc.scope, + }, + } + options, err := GenerateTokenOptions(context.Background(), "host", tc.username, tc.secret, c) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + expected := TokenOptions{ + Realm: tc.realm, + Service: tc.service, + Scopes: strings.Split(tc.scope, " "), + Username: tc.username, + Secret: tc.secret, + } + if !reflect.DeepEqual(options, expected) { + t.Fatalf("expected %v, but got %v", expected, options) + } + }) + } + + t.Run("MissingRealm", func(t *testing.T) { + c := Challenge{ + Scheme: BearerAuth, + Parameters: map[string]string{ + "service": "service", + "scope": "repository:foo/bar:pull,push", + }, + } + _, err := GenerateTokenOptions(context.Background(), "host", "username", "secret", c) + if err == nil { + t.Fatal("expected an err and got nil") + } + }) + + t.Run("RealmParseError", func(t *testing.T) { + c := Challenge{ + Scheme: BearerAuth, + Parameters: map[string]string{ + "realm": "127.0.0.1:8080", + "service": "service", + "scope": "repository:foo/bar:pull,push", + }, + } + _, err := GenerateTokenOptions(context.Background(), "host", "username", "secret", c) + if err == nil { + t.Fatal("expected an err and got nil") + } + }) +} diff --git a/remotes/docker/auth/parse.go b/remotes/docker/auth/parse.go index 223fa2d..e4529a7 100644 --- a/remotes/docker/auth/parse.go +++ b/remotes/docker/auth/parse.go @@ -134,9 +134,6 @@ func parseValueAndParams(header string) (value string, params map[string]string) } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) diff --git a/remotes/docker/auth/parse_test.go b/remotes/docker/auth/parse_test.go new file mode 100644 index 0000000..8a353ce --- /dev/null +++ b/remotes/docker/auth/parse_test.go @@ -0,0 +1,87 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseAuthHeaderBearer(t *testing.T) { + headerTemplate := `Bearer realm="%s",service="%s",scope="%s"` + + for _, tc := range []struct { + name string + realm string + service string + scope string + }{ + { + name: "SingleScope", + realm: "https://auth.docker.io/token", + service: "registry.docker.io", + scope: "repository:foo/bar:pull,push", + }, + { + name: "MultipleScopes", + realm: "https://auth.docker.io/token", + service: "registry.docker.io", + scope: "repository:foo/bar:pull,push repository:foo/baz:pull repository:foo/foo:push", + }, + } { + t.Run(tc.name, func(t *testing.T) { + expected := []Challenge{ + { + Scheme: BearerAuth, + Parameters: map[string]string{ + "realm": tc.realm, + "service": tc.service, + "scope": tc.scope, + }, + }, + } + + hdr := http.Header{ + http.CanonicalHeaderKey("WWW-Authenticate"): []string{fmt.Sprintf( + headerTemplate, tc.realm, tc.service, tc.scope, + )}, + } + actual := ParseAuthHeader(hdr) + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected %v, but got %v", expected, actual) + } + }) + } +} + +func TestParseAuthHeader(t *testing.T) { + v := `Bearer realm="https://auth.example.io/token",empty="",service="registry.example.io",scope="repository:library/hello-world:pull,push"` + h := http.Header{http.CanonicalHeaderKey("WWW-Authenticate"): []string{v}} + challenge := ParseAuthHeader(h) + + actual, ok := challenge[0].Parameters["empty"] + assert.True(t, ok) + assert.Equal(t, "", actual) + + actual, ok = challenge[0].Parameters["service"] + assert.True(t, ok) + assert.Equal(t, "registry.example.io", actual) +} diff --git a/remotes/docker/authorizer.go b/remotes/docker/authorizer.go index 67e4aea..eaa0e5d 100644 --- a/remotes/docker/authorizer.go +++ b/remotes/docker/authorizer.go @@ -19,6 +19,7 @@ package docker import ( "context" "encoding/base64" + "errors" "fmt" "net/http" "strings" @@ -28,7 +29,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes/docker/auth" remoteerrors "github.com/containerd/containerd/remotes/errors" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -37,10 +37,12 @@ type dockerAuthorizer struct { client *http.Client header http.Header - mu sync.Mutex + mu sync.RWMutex // indexed by host name handlers map[string]*authHandler + + onFetchRefreshToken OnFetchRefreshToken } // NewAuthorizer creates a Docker authorizer using the provided function to @@ -51,9 +53,10 @@ func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) } type authorizerConfig struct { - credentials func(string) (string, string, error) - client *http.Client - header http.Header + credentials func(string) (string, string, error) + client *http.Client + header http.Header + onFetchRefreshToken OnFetchRefreshToken } // AuthorizerOpt configures an authorizer @@ -80,6 +83,16 @@ func WithAuthHeader(hdr http.Header) AuthorizerOpt { } } +// OnFetchRefreshToken is called on fetching request token. +type OnFetchRefreshToken func(ctx context.Context, refreshToken string, req *http.Request) + +// WithFetchRefreshToken enables fetching "refresh token" (aka "identity token", "offline token"). +func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.onFetchRefreshToken = f + } +} + // NewDockerAuthorizer creates an authorizer using Docker's registry // authentication spec. // See https://docs.docker.com/registry/spec/auth/ @@ -94,10 +107,11 @@ func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { } return &dockerAuthorizer{ - credentials: ao.credentials, - client: ao.client, - header: ao.header, - handlers: make(map[string]*authHandler), + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), + onFetchRefreshToken: ao.onFetchRefreshToken, } } @@ -109,12 +123,21 @@ func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) err return nil } - auth, err := ah.authorize(ctx) + auth, refreshToken, err := ah.authorize(ctx) if err != nil { return err } req.Header.Set("Authorization", auth) + + if refreshToken != "" { + a.mu.RLock() + onFetchRefreshToken := a.onFetchRefreshToken + a.mu.RUnlock() + if onFetchRefreshToken != nil { + onFetchRefreshToken(ctx, refreshToken, req) + } + } return nil } @@ -161,6 +184,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R if err != nil { return err } + common.FetchRefreshToken = a.onFetchRefreshToken != nil a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) return nil @@ -181,14 +205,15 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R } } } - return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") + return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented) } // authResult is used to control limit rate. type authResult struct { sync.WaitGroup - token string - err error + token string + refreshToken string + err error } // authHandler is used to handle auth request per registry server. @@ -220,29 +245,29 @@ func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.Authentica } } -func (ah *authHandler) authorize(ctx context.Context) (string, error) { +func (ah *authHandler) authorize(ctx context.Context) (string, string, error) { switch ah.scheme { case auth.BasicAuth: return ah.doBasicAuth(ctx) case auth.BearerAuth: return ah.doBearerAuth(ctx) default: - return "", errors.Wrapf(errdefs.ErrNotImplemented, "failed to find supported auth scheme: %s", string(ah.scheme)) + return "", "", fmt.Errorf("failed to find supported auth scheme: %s: %w", string(ah.scheme), errdefs.ErrNotImplemented) } } -func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, string, error) { username, secret := ah.common.Username, ah.common.Secret if username == "" || secret == "" { - return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + return "", "", fmt.Errorf("failed to handle basic auth because missing username or secret") } auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) - return fmt.Sprintf("Basic %s", auth), nil + return fmt.Sprintf("Basic %s", auth), "", nil } -func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err error) { +func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken string, err error) { // copy common tokenOptions to := ah.common @@ -255,7 +280,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err erro if r, exist := ah.scopedTokens[scoped]; exist { ah.Unlock() r.Wait() - return r.token, r.err + return r.token, r.refreshToken, r.err } // only one fetch token job @@ -266,14 +291,16 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err erro defer func() { token = fmt.Sprintf("Bearer %s", token) - r.token, r.err = token, err + r.token, r.refreshToken, r.err = token, refreshToken, err r.Done() }() // fetch token for the resource scope if to.Secret != "" { defer func() { - err = errors.Wrap(err, "failed to fetch oauth token") + if err != nil { + err = fmt.Errorf("failed to fetch oauth token: %w", err) + } }() // credential information is provided, use oauth POST endpoint // TODO: Allow setting client_id @@ -284,28 +311,29 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err erro // Registries without support for POST may return 404 for POST /v2/token. // As of September 2017, GCR is known to return 404. // As of February 2018, JFrog Artifactory is known to return 401. - if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { + // As of January 2022, ACR is known to return 400. + if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 || errStatus.StatusCode == 400 { resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) if err != nil { - return "", err + return "", "", err } - return resp.Token, nil + return resp.Token, resp.RefreshToken, nil } log.G(ctx).WithFields(logrus.Fields{ "status": errStatus.Status, "body": string(errStatus.Body), }).Debugf("token request failed") } - return "", err + return "", "", err } - return resp.AccessToken, nil + return resp.AccessToken, resp.RefreshToken, nil } // do request anonymously resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) if err != nil { - return "", errors.Wrap(err, "failed to fetch anonymous token") + return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err) } - return resp.Token, nil + return resp.Token, resp.RefreshToken, nil } func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { @@ -319,7 +347,7 @@ func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { return nil } - return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr) + return fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization) } func sameRequest(r1, r2 *http.Request) bool { diff --git a/remotes/docker/config/config_unix.go b/remotes/docker/config/config_unix.go index 8245c4d..6967f68 100644 --- a/remotes/docker/config/config_unix.go +++ b/remotes/docker/config/config_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -23,16 +24,18 @@ import ( "path/filepath" ) -func hostPaths(root, host string) []string { +func hostPaths(root, host string) (hosts []string) { ch := hostDirectory(host) - if ch == host { - return []string{filepath.Join(root, host)} + if ch != host { + hosts = append(hosts, filepath.Join(root, ch)) } - return []string{ - filepath.Join(root, ch), + hosts = append(hosts, filepath.Join(root, host), - } + filepath.Join(root, "_default"), + ) + + return } func rootSystemPool() (*x509.CertPool, error) { diff --git a/remotes/docker/config/config_windows.go b/remotes/docker/config/config_windows.go index 948b653..4697728 100644 --- a/remotes/docker/config/config_windows.go +++ b/remotes/docker/config/config_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -24,16 +22,18 @@ import ( "strings" ) -func hostPaths(root, host string) []string { +func hostPaths(root, host string) (hosts []string) { ch := hostDirectory(host) - if ch == host { - return []string{filepath.Join(root, host)} + if ch != host { + hosts = append(hosts, filepath.Join(root, strings.Replace(ch, ":", "", -1))) } - return []string{ - filepath.Join(root, strings.Replace(ch, ":", "", -1)), + hosts = append(hosts, filepath.Join(root, strings.Replace(host, ":", "", -1)), - } + filepath.Join(root, "_default"), + ) + + return } func rootSystemPool() (*x509.CertPool, error) { diff --git a/remotes/docker/config/hosts.go b/remotes/docker/config/hosts.go index b24ba33..aa8ea95 100644 --- a/remotes/docker/config/hosts.go +++ b/remotes/docker/config/hosts.go @@ -20,7 +20,8 @@ package config import ( "context" "crypto/tls" - "io/ioutil" + "errors" + "fmt" "net" "net/http" "net/url" @@ -35,7 +36,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes/docker" "github.com/pelletier/go-toml" - "github.com/pkg/errors" ) // UpdateClientFunc is a function that lets you to amend http Client behavior used by registry clients. @@ -54,8 +54,6 @@ type hostConfig struct { header http.Header - // TODO: API ("docker" or "oci") - // TODO: API Version ("v1", "v2") // TODO: Add credential configuration (domain alias, username) } @@ -66,7 +64,8 @@ type HostOptions struct { DefaultTLS *tls.Config DefaultScheme string // UpdateClient will be called after creating http.Client object, so clients can provide extra configuration - UpdateClient UpdateClientFunc + UpdateClient UpdateClientFunc + AuthorizerOpts []docker.AuthorizerOpt } // ConfigureHosts creates a registry hosts function from the provided @@ -100,6 +99,17 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos if host == "docker.io" { hosts[len(hosts)-1].scheme = "https" hosts[len(hosts)-1].host = "registry-1.docker.io" + } else if docker.IsLocalhost(host) { + hosts[len(hosts)-1].host = host + if options.DefaultScheme == "" || options.DefaultScheme == "http" { + hosts[len(hosts)-1].scheme = "http" + + // Skipping TLS verification for localhost + var skipVerify = true + hosts[len(hosts)-1].skipVerify = &skipVerify + } else { + hosts[len(hosts)-1].scheme = options.DefaultScheme + } } else { hosts[len(hosts)-1].host = host if options.DefaultScheme != "" { @@ -146,6 +156,7 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos if options.Credentials != nil { authOpts = append(authOpts, docker.WithAuthCreds(options.Credentials)) } + authOpts = append(authOpts, options.AuthorizerOpts...) authorizer := docker.NewDockerAuthorizer(authOpts...) rhosts := make([]docker.RegistryHost, len(hosts)) @@ -167,32 +178,32 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos if tlsConfig.RootCAs == nil { rootPool, err := rootSystemPool() if err != nil { - return nil, errors.Wrap(err, "unable to initialize cert pool") + return nil, fmt.Errorf("unable to initialize cert pool: %w", err) } tlsConfig.RootCAs = rootPool } for _, f := range host.caCerts { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { - return nil, errors.Wrapf(err, "unable to read CA cert %q", f) + return nil, fmt.Errorf("unable to read CA cert %q: %w", f, err) } if !tlsConfig.RootCAs.AppendCertsFromPEM(data) { - return nil, errors.Errorf("unable to load CA cert %q", f) + return nil, fmt.Errorf("unable to load CA cert %q", f) } } } if host.clientPairs != nil { for _, pair := range host.clientPairs { - certPEMBlock, err := ioutil.ReadFile(pair[0]) + certPEMBlock, err := os.ReadFile(pair[0]) if err != nil { - return nil, errors.Wrapf(err, "unable to read CERT file %q", pair[0]) + return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[0], err) } var keyPEMBlock []byte if pair[1] != "" { - keyPEMBlock, err = ioutil.ReadFile(pair[1]) + keyPEMBlock, err = os.ReadFile(pair[1]) if err != nil { - return nil, errors.Wrapf(err, "unable to read CERT file %q", pair[1]) + return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[1], err) } } else { // Load key block from same PEM file @@ -200,7 +211,7 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos } cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) if err != nil { - return nil, errors.Wrap(err, "failed to load X509 key pair") + return nil, fmt.Errorf("failed to load X509 key pair: %w", err) } tlsConfig.Certificates = append(tlsConfig.Certificates, cert) @@ -253,7 +264,7 @@ func hostDirectory(host string) string { } func loadHostDir(ctx context.Context, hostsDir string) ([]hostConfig, error) { - b, err := ioutil.ReadFile(filepath.Join(hostsDir, "hosts.toml")) + b, err := os.ReadFile(filepath.Join(hostsDir, "hosts.toml")) if err != nil && !os.IsNotExist(err) { return nil, err } @@ -283,29 +294,45 @@ type hostFileConfig struct { // - push Capabilities []string `toml:"capabilities"` - // CACert can be a string or an array of strings + // CACert are the public key certificates for TLS + // Accepted types + // - string - Single file with certificate(s) + // - []string - Multiple files with certificates CACert interface{} `toml:"ca"` - // TODO: Make this an array (two key types, one for pairs (multiple files), one for single file?) + // Client keypair(s) for TLS with client authentication + // Accepted types + // - string - Single file with public and private keys + // - []string - Multiple files with public and private keys + // - [][2]string - Multiple keypairs with public and private keys in separate files Client interface{} `toml:"client"` + // SkipVerify skips verification of the server's certificate chain + // and host name. This should only be used for testing or in + // combination with other methods of verifying connections. SkipVerify *bool `toml:"skip_verify"` + // Header are additional header files to send to the server Header map[string]interface{} `toml:"header"` - // API (default: "docker") - // API Version (default: "v2") - // Credentials: helper? name? username? alternate domain? token? + // OverridePath indicates the API root endpoint is defined in the URL + // path rather than by the API specification. + // This may be used with non-compliant OCI registries to override the + // API root endpoint. + OverridePath bool `toml:"override_path"` + + // TODO: Credentials: helper? name? username? alternate domain? token? } func parseHostsFile(baseDir string, b []byte) ([]hostConfig, error) { tree, err := toml.LoadBytes(b) if err != nil { - return nil, errors.Wrap(err, "failed to parse TOML") + return nil, fmt.Errorf("failed to parse TOML: %w", err) } // HACK: we want to keep toml parsing structures private in this package, however go-toml ignores private embedded types. // so we remap it to a public type within the func body, so technically it's public, but not possible to import elsewhere. + //nolint:unused type HostFileConfig = hostFileConfig c := struct { @@ -363,20 +390,16 @@ func parseHostConfig(server string, baseDir string, config hostFileConfig) (host } u, err := url.Parse(server) if err != nil { - return hostConfig{}, errors.Wrapf(err, "unable to parse server %v", server) + return hostConfig{}, fmt.Errorf("unable to parse server %v: %w", server, err) } result.scheme = u.Scheme result.host = u.Host - // TODO: Handle path based on registry protocol - // Define a registry protocol type - // OCI v1 - Always use given path as is - // Docker v2 - Always ensure ends with /v2/ if len(u.Path) > 0 { u.Path = path.Clean(u.Path) - if !strings.HasSuffix(u.Path, "/v2") { + if !strings.HasSuffix(u.Path, "/v2") && !config.OverridePath { u.Path = u.Path + "/v2" } - } else { + } else if !config.OverridePath { u.Path = "/v2" } result.path = u.Path @@ -394,7 +417,7 @@ func parseHostConfig(server string, baseDir string, config hostFileConfig) (host case "push": result.capabilities |= docker.HostCapabilityPush default: - return hostConfig{}, errors.Errorf("unknown capability %v", c) + return hostConfig{}, fmt.Errorf("unknown capability %v", c) } } } else { @@ -413,7 +436,7 @@ func parseHostConfig(server string, baseDir string, config hostFileConfig) (host return hostConfig{}, err } default: - return hostConfig{}, errors.Errorf("invalid type %v for \"ca\"", cert) + return hostConfig{}, fmt.Errorf("invalid type %v for \"ca\"", cert) } } @@ -435,18 +458,18 @@ func parseHostConfig(server string, baseDir string, config hostFileConfig) (host return hostConfig{}, err } if len(slice) != 2 { - return hostConfig{}, errors.Errorf("invalid pair %v for \"client\"", p) + return hostConfig{}, fmt.Errorf("invalid pair %v for \"client\"", p) } var pair [2]string copy(pair[:], slice) result.clientPairs = append(result.clientPairs, pair) default: - return hostConfig{}, errors.Errorf("invalid type %T for \"client\"", p) + return hostConfig{}, fmt.Errorf("invalid type %T for \"client\"", p) } } default: - return hostConfig{}, errors.Errorf("invalid type %v for \"client\"", client) + return hostConfig{}, fmt.Errorf("invalid type %v for \"client\"", client) } } @@ -462,7 +485,7 @@ func parseHostConfig(server string, baseDir string, config hostFileConfig) (host return hostConfig{}, err } default: - return hostConfig{}, errors.Errorf("invalid type %v for header %q", ty, key) + return hostConfig{}, fmt.Errorf("invalid type %v for header %q", ty, key) } } result.header = header @@ -475,7 +498,7 @@ func parseHostConfig(server string, baseDir string, config hostFileConfig) (host func getSortedHosts(root *toml.Tree) ([]string, error) { iter, ok := root.Get("host").(*toml.Tree) if !ok { - return nil, errors.Errorf("invalid `host` tree") + return nil, errors.New("invalid `host` tree") } list := append([]string{}, iter.Keys()...) @@ -498,7 +521,7 @@ func makeStringSlice(slice []interface{}, cb func(string) string) ([]string, err for i, value := range slice { str, ok := value.(string) if !ok { - return nil, errors.Errorf("unable to cast %v to string", value) + return nil, fmt.Errorf("unable to cast %v to string", value) } if cb != nil { @@ -519,15 +542,15 @@ func makeAbsPath(p string, base string) string { // loadCertsDir loads certs from certsDir like "/etc/docker/certs.d" . // Compatible with Docker file layout -// - files ending with ".crt" are treated as CA certificate files -// - files ending with ".cert" are treated as client certificates, and -// files with the same name but ending with ".key" are treated as the -// corresponding private key. -// NOTE: If a ".key" file is missing, this function will just return -// the ".cert", which may contain the private key. If the ".cert" file -// does not contain the private key, the caller should detect and error. +// - files ending with ".crt" are treated as CA certificate files +// - files ending with ".cert" are treated as client certificates, and +// files with the same name but ending with ".key" are treated as the +// corresponding private key. +// NOTE: If a ".key" file is missing, this function will just return +// the ".cert", which may contain the private key. If the ".cert" file +// does not contain the private key, the caller should detect and error. func loadCertFiles(ctx context.Context, certsDir string) ([]hostConfig, error) { - fs, err := ioutil.ReadDir(certsDir) + fs, err := os.ReadDir(certsDir) if err != nil && !os.IsNotExist(err) { return nil, err } diff --git a/remotes/docker/config/hosts_test.go b/remotes/docker/config/hosts_test.go index 18dc1c6..a3941f0 100644 --- a/remotes/docker/config/hosts_test.go +++ b/remotes/docker/config/hosts_test.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -104,6 +103,13 @@ ca = "/etc/path/default" [host."https://test-3.registry"] client = ["/etc/certs/client-1.pem", "/etc/certs/client-2.pem"] + +[host."https://noncompliantmirror.registry/v2/namespaceprefix"] + capabilities = ["pull"] + override_path = true + +[host."https://noprefixnoncompliant.registry"] + override_path = true ` var tb, fb = true, false expected := []hostConfig{ @@ -159,6 +165,17 @@ ca = "/etc/path/default" {filepath.FromSlash("/etc/certs/client-2.pem")}, }, }, + { + scheme: "https", + host: "noncompliantmirror.registry", + path: "/v2/namespaceprefix", + capabilities: docker.HostCapabilityPull, + }, + { + scheme: "https", + host: "noprefixnoncompliant.registry", + capabilities: allCaps, + }, { scheme: "https", host: "test-default.registry", @@ -191,11 +208,7 @@ ca = "/etc/path/default" } func TestLoadCertFiles(t *testing.T) { - dir, err := ioutil.TempDir("", t.Name()) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() type testCase struct { input hostConfig @@ -239,16 +252,16 @@ func TestLoadCertFiles(t *testing.T) { defer os.RemoveAll(hostDir) for _, f := range tc.input.caCerts { - if err := ioutil.WriteFile(f, testKey, 0600); err != nil { + if err := os.WriteFile(f, testKey, 0600); err != nil { t.Fatal(err) } } for _, pair := range tc.input.clientPairs { - if err := ioutil.WriteFile(pair[0], testKey, 0600); err != nil { + if err := os.WriteFile(pair[0], testKey, 0600); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(pair[1], testKey, 0600); err != nil { + if err := os.WriteFile(pair[1], testKey, 0600); err != nil { t.Fatal(err) } } diff --git a/remotes/docker/converter.go b/remotes/docker/converter.go index 43e6b37..d7dca0d 100644 --- a/remotes/docker/converter.go +++ b/remotes/docker/converter.go @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/remotes" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // LegacyConfigMediaType should be replaced by OCI image spec. @@ -52,12 +51,12 @@ func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Desc // read manifest data mb, err := content.ReadBlob(ctx, store, desc) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to read index data") + return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err) } var manifest ocispec.Manifest if err := json.Unmarshal(mb, &manifest); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal data into manifest") + return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err) } // check config media type @@ -68,7 +67,7 @@ func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Desc manifest.Config.MediaType = images.MediaTypeDockerSchema2Config data, err := json.MarshalIndent(manifest, "", " ") if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal manifest") + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err) } // update manifest with gc labels @@ -82,7 +81,7 @@ func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Desc ref := remotes.MakeRefKey(ctx, desc) if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to update content") + return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err) } return desc, nil } diff --git a/remotes/docker/fetcher.go b/remotes/docker/fetcher.go index 4b2c10e..11a7535 100644 --- a/remotes/docker/fetcher.go +++ b/remotes/docker/fetcher.go @@ -19,9 +19,9 @@ package docker import ( "context" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -30,7 +30,6 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type dockerFetcher struct { @@ -42,7 +41,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R hosts := r.filterHosts(HostCapabilityPull) if len(hosts) == 0 { - return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") + return nil, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) } ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) @@ -142,9 +141,9 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R } if errdefs.IsNotFound(firstErr) { - firstErr = errors.Wrapf(errdefs.ErrNotFound, - "could not fetch content descriptor %v (%v) from remote", - desc.Digest, desc.MediaType) + firstErr = fmt.Errorf("could not fetch content descriptor %v (%v) from remote: %w", + desc.Digest, desc.MediaType, errdefs.ErrNotFound, + ) } return nil, firstErr @@ -179,19 +178,19 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, // implementation. if resp.StatusCode == http.StatusNotFound { - return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) + return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound) } var registryErr Errors if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) + return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status) } - return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) + return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) } if offset > 0 { cr := resp.Header.Get("content-range") if cr != "" { if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { - return nil, errors.Errorf("unhandled content range in response: %v", cr) + return nil, fmt.Errorf("unhandled content range in response: %v", cr) } } else { @@ -201,12 +200,12 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, // Discard up to offset // Could use buffer pool here but this case should be rare - n, err := io.Copy(ioutil.Discard, io.LimitReader(resp.Body, offset)) + n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset)) if err != nil { - return nil, errors.Wrap(err, "failed to discard to offset") + return nil, fmt.Errorf("failed to discard to offset: %w", err) } if n != offset { - return nil, errors.Errorf("unable to discard to offset") + return nil, errors.New("unable to discard to offset") } } diff --git a/remotes/docker/fetcher_test.go b/remotes/docker/fetcher_test.go index a9d0e39..47cbb6f 100644 --- a/remotes/docker/fetcher_test.go +++ b/remotes/docker/fetcher_test.go @@ -21,14 +21,12 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "net/http" "net/http/httptest" "net/url" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" ) @@ -73,7 +71,7 @@ func TestFetcherOpen(t *testing.T) { if err != nil { t.Fatalf("failed to open: %+v", err) } - b, err := ioutil.ReadAll(rc) + b, err := io.ReadAll(rc) if err != nil { t.Fatal(err) } @@ -201,9 +199,9 @@ func TestDockerFetcherOpen(t *testing.T) { if tt.wantErr { var expectedError error if tt.wantServerMessageError { - expectedError = errors.Errorf("unexpected status code %v/ns: %v %s - Server message: %s", s.URL, tt.mockedStatus, http.StatusText(tt.mockedStatus), tt.mockedErr.Error()) + expectedError = fmt.Errorf("unexpected status code %v/ns: %v %s - Server message: %s", s.URL, tt.mockedStatus, http.StatusText(tt.mockedStatus), tt.mockedErr.Error()) } else if tt.wantPlainError { - expectedError = errors.Errorf("unexpected status code %v/ns: %v %s", s.URL, tt.mockedStatus, http.StatusText(tt.mockedStatus)) + expectedError = fmt.Errorf("unexpected status code %v/ns: %v %s", s.URL, tt.mockedStatus, http.StatusText(tt.mockedStatus)) } assert.Equal(t, expectedError.Error(), err.Error()) diff --git a/remotes/docker/httpreadseeker.go b/remotes/docker/httpreadseeker.go index 58c866b..9a827ef 100644 --- a/remotes/docker/httpreadseeker.go +++ b/remotes/docker/httpreadseeker.go @@ -18,12 +18,11 @@ package docker import ( "bytes" + "fmt" "io" - "io/ioutil" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) const maxRetry = 3 @@ -70,7 +69,7 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { } if hrs.rc != nil { if clsErr := hrs.rc.Close(); clsErr != nil { - log.L.WithError(clsErr).Errorf("httpReadSeeker: failed to close ReadCloser") + log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser") } hrs.rc = nil } @@ -95,7 +94,7 @@ func (hrs *httpReadSeeker) Close() error { func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { if hrs.closed { - return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed") + return 0, fmt.Errorf("Fetcher.Seek: closed: %w", errdefs.ErrUnavailable) } abs := hrs.offset @@ -106,21 +105,21 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { abs += offset case io.SeekEnd: if hrs.size == -1 { - return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end") + return 0, fmt.Errorf("Fetcher.Seek: unknown size, cannot seek from end: %w", errdefs.ErrUnavailable) } abs = hrs.size + offset default: - return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence") + return 0, fmt.Errorf("Fetcher.Seek: invalid whence: %w", errdefs.ErrInvalidArgument) } if abs < 0 { - return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset") + return 0, fmt.Errorf("Fetcher.Seek: negative offset: %w", errdefs.ErrInvalidArgument) } if abs != hrs.offset { if hrs.rc != nil { if err := hrs.rc.Close(); err != nil { - log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser") + log.L.WithError(err).Error("Fetcher.Seek: failed to close ReadCloser") } hrs.rc = nil @@ -141,17 +140,17 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // only try to reopen the body request if we are seeking to a value // less than the actual size. if hrs.open == nil { - return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open") + return nil, fmt.Errorf("cannot open: %w", errdefs.ErrNotImplemented) } rc, err := hrs.open(hrs.offset) if err != nil { - return nil, errors.Wrapf(err, "httpReadSeeker: failed open") + return nil, fmt.Errorf("httpReadSeeker: failed open: %w", err) } if hrs.rc != nil { if err := hrs.rc.Close(); err != nil { - log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser") + log.L.WithError(err).Error("httpReadSeeker: failed to close ReadCloser") } } hrs.rc = rc @@ -162,7 +161,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // as the length is already satisfied but we just return the empty // reader instead. - hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{})) + hrs.rc = io.NopCloser(bytes.NewReader([]byte{})) } return hrs.rc, nil diff --git a/remotes/docker/pusher.go b/remotes/docker/pusher.go index 97ed66a..bef77fa 100644 --- a/remotes/docker/pusher.go +++ b/remotes/docker/pusher.go @@ -18,11 +18,13 @@ package docker import ( "context" + "errors" + "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" + "sync" "time" "github.com/containerd/containerd/content" @@ -33,7 +35,6 @@ import ( remoteserrors "github.com/containerd/containerd/remotes/errors" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type dockerPusher struct { @@ -56,7 +57,7 @@ func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (co } } if wOpts.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) } return p.push(ctx, wOpts.Desc, wOpts.Ref, true) } @@ -77,22 +78,22 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str status, err := p.tracker.GetStatus(ref) if err == nil { if status.Committed && status.Offset == status.Total { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref) + return nil, fmt.Errorf("ref %v: %w", ref, errdefs.ErrAlreadyExists) } - if unavailableOnFail { + if unavailableOnFail && status.ErrClosed == nil { // Another push of this ref is happening elsewhere. The rest of function // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there // is no actively-tracked ref already). - return nil, errors.Wrap(errdefs.ErrUnavailable, "push is on-going") + return nil, fmt.Errorf("push is on-going: %w", errdefs.ErrUnavailable) } // TODO: Handle incomplete status } else if !errdefs.IsNotFound(err) { - return nil, errors.Wrap(err, "failed to get status") + return nil, fmt.Errorf("failed to get status: %w", err) } hosts := p.filterHosts(HostCapabilityPush) if len(hosts) == 0 { - return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") + return nil, fmt.Errorf("no push hosts: %w", errdefs.ErrNotFound) } var ( @@ -144,7 +145,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str }, }) resp.Body.Close() - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) + return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) } } else if resp.StatusCode != http.StatusNotFound { err := remoteserrors.NewUnexpectedStatusErr(resp) @@ -206,7 +207,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str Offset: desc.Size, }, }) - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) + return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) default: err := remoteserrors.NewUnexpectedStatusErr(resp) log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") @@ -222,7 +223,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str if strings.HasPrefix(location, "/") { lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) if err != nil { - return nil, errors.Wrapf(err, "unable to parse location %v", location) + return nil, fmt.Errorf("unable to parse location %v: %w", location, err) } } else { if !strings.Contains(location, "://") { @@ -230,7 +231,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str } lurl, err = url.Parse(location) if err != nil { - return nil, errors.Wrapf(err, "unable to parse location %v", location) + return nil, fmt.Errorf("unable to parse location %v: %w", location, err) } if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { @@ -261,27 +262,20 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str // TODO: Support chunked upload - pr, pw := io.Pipe() - respC := make(chan response, 1) - body := ioutil.NopCloser(pr) + pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest) req.body = func() (io.ReadCloser, error) { - if body == nil { - return nil, errors.New("cannot reuse body, request must be retried") - } - // Only use the body once since pipe cannot be seeked - ob := body - body = nil - return ob, nil + pr, pw := io.Pipe() + pushw.setPipe(pw) + return io.NopCloser(pr), nil } req.size = desc.Size go func() { - defer close(respC) resp, err := req.doWithRetries(ctx, nil) if err != nil { - respC <- response{err: err} - pr.CloseWithError(err) + pushw.setError(err) + pushw.Close() return } @@ -290,20 +284,13 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str default: err := remoteserrors.NewUnexpectedStatusErr(resp) log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - pr.CloseWithError(err) + pushw.setError(err) + pushw.Close() } - respC <- response{Response: resp} + pushw.setResponse(resp) }() - return &pushWriter{ - base: p.dockerBase, - ref: ref, - pipe: pw, - responseC: respC, - isManifest: isManifest, - expected: desc.Digest, - tracker: p.tracker, - }, nil + return pushw, nil } func getManifestPath(object string, dgst digest.Digest) []string { @@ -325,29 +312,89 @@ func getManifestPath(object string, dgst digest.Digest) []string { return []string{"manifests", object} } -type response struct { - *http.Response - err error -} - type pushWriter struct { base *dockerBase ref string - pipe *io.PipeWriter - responseC <-chan response + pipe *io.PipeWriter + + pipeC chan *io.PipeWriter + respC chan *http.Response + closeOnce sync.Once + errC chan error + isManifest bool expected digest.Digest tracker StatusTracker } +func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter { + // Initialize and create response + return &pushWriter{ + base: db, + ref: ref, + expected: expected, + tracker: tracker, + pipeC: make(chan *io.PipeWriter, 1), + respC: make(chan *http.Response, 1), + errC: make(chan error, 1), + isManifest: isManifest, + } +} + +func (pw *pushWriter) setPipe(p *io.PipeWriter) { + pw.pipeC <- p +} + +func (pw *pushWriter) setError(err error) { + pw.errC <- err +} +func (pw *pushWriter) setResponse(resp *http.Response) { + pw.respC <- resp +} + func (pw *pushWriter) Write(p []byte) (n int, err error) { status, err := pw.tracker.GetStatus(pw.ref) if err != nil { return n, err } + + if pw.pipe == nil { + p, ok := <-pw.pipeC + if !ok { + return 0, io.ErrClosedPipe + } + pw.pipe = p + } else { + select { + case p, ok := <-pw.pipeC: + if !ok { + return 0, io.ErrClosedPipe + } + pw.pipe.CloseWithError(content.ErrReset) + pw.pipe = p + + // If content has already been written, the bytes + // cannot be written and the caller must reset + status.Offset = 0 + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return 0, content.ErrReset + default: + } + } + n, err = pw.pipe.Write(p) + if errors.Is(err, io.ErrClosedPipe) { + // if the pipe is closed, we might have the original error on the error + // channel - so we should try and get it + select { + case err2 := <-pw.errC: + err = err2 + default: + } + } status.Offset += int64(n) status.UpdatedAt = time.Now() pw.tracker.SetStatus(pw.ref, status) @@ -355,7 +402,21 @@ func (pw *pushWriter) Write(p []byte) (n int, err error) { } func (pw *pushWriter) Close() error { - return pw.pipe.Close() + // Ensure pipeC is closed but handle `Close()` being + // called multiple times without panicking + pw.closeOnce.Do(func() { + close(pw.pipeC) + }) + if pw.pipe != nil { + status, err := pw.tracker.GetStatus(pw.ref) + if err == nil && !status.Committed { + // Closing an incomplete writer. Record this as an error so that following write can retry it. + status.ErrClosed = errors.New("closed incomplete writer") + pw.tracker.SetStatus(pw.ref, status) + } + return pw.pipe.Close() + } + return nil } func (pw *pushWriter) Status() (content.Status, error) { @@ -374,35 +435,57 @@ func (pw *pushWriter) Digest() digest.Digest { func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { // Check whether read has already thrown an error - if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe { - return errors.Wrap(err, "pipe error before commit") + if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) { + return fmt.Errorf("pipe error before commit: %w", err) } if err := pw.pipe.Close(); err != nil { return err } // TODO: timeout waiting for response - resp := <-pw.responseC - if resp.err != nil { - return resp.err + var resp *http.Response + select { + case err := <-pw.errC: + return err + case resp = <-pw.respC: + defer resp.Body.Close() + case p, ok := <-pw.pipeC: + // check whether the pipe has changed in the commit, because sometimes Write + // can complete successfully, but the pipe may have changed. In that case, the + // content needs to be reset. + if !ok { + return io.ErrClosedPipe + } + pw.pipe.CloseWithError(content.ErrReset) + pw.pipe = p + + // If content has already been written, the bytes + // cannot be written again and the caller must reset + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return err + } + status.Offset = 0 + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return content.ErrReset } - defer resp.Response.Body.Close() // 201 is specified return status, some registries return // 200, 202 or 204. switch resp.StatusCode { case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: default: - return remoteserrors.NewUnexpectedStatusErr(resp.Response) + return remoteserrors.NewUnexpectedStatusErr(resp) } status, err := pw.tracker.GetStatus(pw.ref) if err != nil { - return errors.Wrap(err, "failed to get status") + return fmt.Errorf("failed to get status: %w", err) } if size > 0 && size != status.Offset { - return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) + return fmt.Errorf("unexpected size %d, expected %d", status.Offset, size) } if expected == "" { @@ -411,11 +494,11 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) if err != nil { - return errors.Wrap(err, "invalid content digest in response") + return fmt.Errorf("invalid content digest in response: %w", err) } if actual != expected { - return errors.Errorf("got digest %s, expected %s", actual, expected) + return fmt.Errorf("got digest %s, expected %s", actual, expected) } status.Committed = true diff --git a/remotes/docker/pusher_test.go b/remotes/docker/pusher_test.go index 8b81eb3..d982a7d 100644 --- a/remotes/docker/pusher_test.go +++ b/remotes/docker/pusher_test.go @@ -17,10 +17,24 @@ package docker import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" "reflect" + "regexp" + "strings" "testing" - digest "github.com/opencontainers/go-digest" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" ) func TestGetManifestPath(t *testing.T) { @@ -50,3 +64,316 @@ func TestGetManifestPath(t *testing.T) { } } } + +// TestPusherErrClosedRetry tests if retrying work when error occurred on close. +func TestPusherErrClosedRetry(t *testing.T) { + ctx := context.Background() + + p, reg, done := samplePusher(t) + defer done() + + layerContent := []byte("test") + reg.uploadable = false + if err := tryUpload(ctx, t, p, layerContent); err == nil { + t.Errorf("upload should fail but succeeded") + } + + // retry + reg.uploadable = true + if err := tryUpload(ctx, t, p, layerContent); err != nil { + t.Errorf("upload should succeed but got %v", err) + } +} + +// TestPusherErrReset tests the push method if the request needs to be retried +// i.e when ErrReset occurs +func TestPusherErrReset(t *testing.T) { + p, reg, done := samplePusher(t) + defer done() + + p.object = "latest@sha256:55d31f3af94c797b65b310569803cacc1c9f4a34bf61afcdc8138f89345c8308" + + reg.uploadable = true + reg.putHandlerFunc = func() func(w http.ResponseWriter, r *http.Request) bool { + // sets whether the request should timeout so that a reset error can occur and + // request will be retried + shouldTimeout := true + return func(w http.ResponseWriter, r *http.Request) bool { + if shouldTimeout { + shouldTimeout = !shouldTimeout + w.WriteHeader(http.StatusRequestTimeout) + return true + } + return false + } + }() + + ct := []byte("manifest-content") + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: digest.FromBytes(ct), + Size: int64(len(ct)), + } + + w, err := p.push(context.Background(), desc, remotes.MakeRefKey(context.Background(), desc), false) + assert.NoError(t, err) + + // first push should fail with ErrReset + _, err = w.Write(ct) + assert.NoError(t, err) + err = w.Commit(context.Background(), desc.Size, desc.Digest) + assert.Equal(t, content.ErrReset, err) + + // second push should succeed + _, err = w.Write(ct) + assert.NoError(t, err) + err = w.Commit(context.Background(), desc.Size, desc.Digest) + assert.NoError(t, err) +} + +func tryUpload(ctx context.Context, t *testing.T, p dockerPusher, layerContent []byte) error { + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayerGzip, + Digest: digest.FromBytes(layerContent), + Size: int64(len(layerContent)), + } + cw, err := p.Writer(ctx, content.WithRef("test-1"), content.WithDescriptor(desc)) + if err != nil { + return err + } + defer cw.Close() + if _, err := cw.Write(layerContent); err != nil { + return err + } + return cw.Commit(ctx, 0, "") +} + +func samplePusher(t *testing.T) (dockerPusher, *uploadableMockRegistry, func()) { + reg := &uploadableMockRegistry{ + availableContents: make([]string, 0), + } + s := httptest.NewServer(reg) + u, err := url.Parse(s.URL) + if err != nil { + t.Fatal(err) + } + return dockerPusher{ + dockerBase: &dockerBase{ + repository: "sample", + hosts: []RegistryHost{ + { + Client: s.Client(), + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPush | HostCapabilityResolve, + }, + }, + }, + object: "sample", + tracker: NewInMemoryTracker(), + }, reg, s.Close +} + +var manifestRegexp = regexp.MustCompile(`/([a-z0-9]+)/manifests/(.*)`) +var blobUploadRegexp = regexp.MustCompile(`/([a-z0-9]+)/blobs/uploads/(.*)`) + +// uploadableMockRegistry provides minimal registry APIs which are enough to serve requests from dockerPusher. +type uploadableMockRegistry struct { + availableContents []string + uploadable bool + putHandlerFunc func(w http.ResponseWriter, r *http.Request) bool +} + +func (u *uploadableMockRegistry) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPut && u.putHandlerFunc != nil { + // if true return the response witout calling default handler + if u.putHandlerFunc(w, r) { + return + } + } + u.defaultHandler(w, r) +} + +func (u *uploadableMockRegistry) defaultHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPost { + if matches := blobUploadRegexp.FindStringSubmatch(r.URL.Path); len(matches) != 0 { + if u.uploadable { + w.Header().Set("Location", "/upload") + } else { + w.Header().Set("Location", "/cannotupload") + } + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), r.Body); err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + u.availableContents = append(u.availableContents, dgstr.Digest().String()) + w.WriteHeader(http.StatusAccepted) + return + } + } else if r.Method == http.MethodPut { + mfstMatches := manifestRegexp.FindStringSubmatch(r.URL.Path) + if len(mfstMatches) != 0 || strings.HasPrefix(r.URL.Path, "/upload") { + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), r.Body); err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + u.availableContents = append(u.availableContents, dgstr.Digest().String()) + w.Header().Set("Docker-Content-Digest", dgstr.Digest().String()) + w.WriteHeader(http.StatusCreated) + return + } else if r.URL.Path == "/cannotupload" { + w.WriteHeader(http.StatusInternalServerError) + return + } + } else if r.Method == http.MethodHead { + var content string + // check for both manifest and blob paths + if manifestMatch := manifestRegexp.FindStringSubmatch(r.URL.Path); len(manifestMatch) == 3 { + content = manifestMatch[2] + } else if blobMatch := blobUploadRegexp.FindStringSubmatch(r.URL.Path); len(blobMatch) == 3 { + content = blobMatch[2] + } + // if content is not found or if the path is not manifest or blob + // we return 404 + if u.isContentAlreadyExist(content) { + w.WriteHeader(http.StatusOK) + } else { + w.WriteHeader(http.StatusNotFound) + } + return + } + fmt.Println(r) + w.WriteHeader(http.StatusNotFound) +} + +// checks if the content is already present in the registry +func (u *uploadableMockRegistry) isContentAlreadyExist(c string) bool { + for _, ct := range u.availableContents { + if ct == c { + return true + } + } + return false +} + +func Test_dockerPusher_push(t *testing.T) { + + p, reg, done := samplePusher(t) + defer done() + + reg.uploadable = true + + manifestContent := []byte("manifest-content") + manifestContentDigest := digest.FromBytes(manifestContent) + layerContent := []byte("layer-content") + layerContentDigest := digest.FromBytes(layerContent) + + // using a random object here + baseObject := "latest@sha256:55d31f3af94c797b65b310569803cacc1c9f4a34bf61afcdc8138f89345c8308" + + type args struct { + content []byte + mediatype string + ref string + unavailableOnFail bool + } + tests := []struct { + name string + dp dockerPusher + dockerBaseObject string + args args + checkerFunc func(writer *pushWriter) bool + wantErr error + }{ + { + name: "when a manifest is pushed", + dp: p, + dockerBaseObject: baseObject, + args: args{ + content: manifestContent, + mediatype: ocispec.MediaTypeImageManifest, + ref: fmt.Sprintf("manifest-%s", manifestContentDigest.String()), + unavailableOnFail: false, + }, + checkerFunc: func(writer *pushWriter) bool { + select { + case resp := <-writer.respC: + // 201 should be the response code when uploading a new manifest + return resp.StatusCode == http.StatusCreated + case <-writer.errC: + return false + } + }, + wantErr: nil, + }, + { + name: "trying to push content that already exists", + dp: p, + dockerBaseObject: baseObject, + args: args{ + content: manifestContent, + mediatype: ocispec.MediaTypeImageManifest, + ref: fmt.Sprintf("manifest-%s", manifestContentDigest.String()), + unavailableOnFail: false, + }, + wantErr: fmt.Errorf("content %v on remote: %w", digest.FromBytes(manifestContent), errdefs.ErrAlreadyExists), + }, + { + name: "trying to push a blob layer", + dp: p, + // Not needed to set the base object as it is used to generate path only in case of manifests + // dockerBaseObject: + args: args{ + content: layerContent, + mediatype: ocispec.MediaTypeImageLayer, + ref: fmt.Sprintf("layer-%s", layerContentDigest.String()), + unavailableOnFail: false, + }, + checkerFunc: func(writer *pushWriter) bool { + select { + case resp := <-writer.respC: + // 201 should be the response code when uploading a new blob + return resp.StatusCode == http.StatusCreated + case <-writer.errC: + return false + } + }, + wantErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + desc := ocispec.Descriptor{ + MediaType: test.args.mediatype, + Digest: digest.FromBytes(test.args.content), + Size: int64(len(test.args.content)), + } + + test.dp.object = test.dockerBaseObject + + got, err := test.dp.push(context.Background(), desc, test.args.ref, test.args.unavailableOnFail) + + assert.Equal(t, test.wantErr, err) + // if an error is expected, further comparisons are not required. + if test.wantErr != nil { + return + } + + // write the content to the writer, this will be done when a Read() is called on the body of the request + got.Write(test.args.content) + + pw, ok := got.(*pushWriter) + if !ok { + assert.Errorf(t, errors.New("unable to cast content.Writer to pushWriter"), "got %v instead of pushwriter", got) + } + + // test whether a proper response has been received after the push operation + assert.True(t, test.checkerFunc(pw)) + + }) + } +} diff --git a/remotes/docker/registry.go b/remotes/docker/registry.go index 1e77d4c..98cafcd 100644 --- a/remotes/docker/registry.go +++ b/remotes/docker/registry.go @@ -17,10 +17,9 @@ package docker import ( + "errors" "net" "net/http" - - "github.com/pkg/errors" ) // HostCapabilities represent the capabilities of the registry diff --git a/remotes/docker/resolver.go b/remotes/docker/resolver.go index 1be9e1d..13f500e 100644 --- a/remotes/docker/resolver.go +++ b/remotes/docker/resolver.go @@ -18,9 +18,10 @@ package docker import ( "context" + "errors" "fmt" "io" - "io/ioutil" + "net" "net/http" "net/url" "path" @@ -35,7 +36,6 @@ import ( "github.com/containerd/containerd/version" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context/ctxhttp" ) @@ -95,25 +95,30 @@ type ResolverOptions struct { Tracker StatusTracker // Authorizer is used to authorize registry requests - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Authorizer Authorizer // Credentials provides username and secret given a host. // If username is empty but a secret is given, that secret // is interpreted as a long lived token. - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Credentials func(string) (string, string, error) // Host provides the hostname given a namespace. - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Host func(string) (string, error) // PlainHTTP specifies to use plain http and not https - // Deprecated: use Hosts + // + // Deprecated: use Hosts. PlainHTTP bool // Client is the http client to used when making registry requests - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Client *http.Client } @@ -140,6 +145,9 @@ func NewResolver(options ResolverOptions) remotes.Resolver { if options.Headers == nil { options.Headers = make(http.Header) + } else { + // make a copy of the headers to avoid race due to concurrent map write + options.Headers = options.Headers.Clone() } if _, ok := options.Headers["User-Agent"]; !ok { options.Headers.Set("User-Agent", "containerd/"+version.Version) @@ -255,7 +263,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp hosts := base.filterHosts(caps) if len(hosts) == 0 { - return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") + return "", ocispec.Descriptor{}, fmt.Errorf("no resolve hosts: %w", errdefs.ErrNotFound) } ctx, err = ContextWithRepositoryScope(ctx, refspec, false) @@ -280,7 +288,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp resp, err := req.doWithRetries(ctx, nil) if err != nil { if errors.Is(err, ErrInvalidAuthorization) { - err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err) } // Store the error for referencing later if firstErr == nil { @@ -299,11 +307,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp if resp.StatusCode > 399 { // Set firstErr when encountering the first non-404 status code. if firstErr == nil { - firstErr = errors.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status) + firstErr = fmt.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status) } continue // try another host } - return "", ocispec.Descriptor{}, errors.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status) + return "", ocispec.Descriptor{}, fmt.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status) } size := resp.ContentLength contentType := getManifestMediaType(resp) @@ -319,7 +327,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp if dgstHeader != "" && size != -1 { if err := dgstHeader.Validate(); err != nil { - return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) + return "", ocispec.Descriptor{}, fmt.Errorf("%q in header not a valid digest: %w", dgstHeader, err) } dgst = dgstHeader } @@ -359,7 +367,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, err } } - } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { + } else if _, err := io.Copy(io.Discard, &bodyReader); err != nil { return "", ocispec.Descriptor{}, err } size = bodyReader.bytesRead @@ -367,7 +375,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp // Prevent resolving to excessively large manifests if size > MaxManifestSize { if firstErr == nil { - firstErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) + firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound) } continue } @@ -388,7 +396,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp // means that either no registries were given or each registry returned 404. if firstErr == nil { - firstErr = errors.Wrap(errdefs.ErrNotFound, ref) + firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound) } return "", ocispec.Descriptor{}, firstErr @@ -529,9 +537,10 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { if err != nil { return nil, err } - req.Header = http.Header{} // headers need to be copied to avoid concurrent map access - for k, v := range r.header { - req.Header[k] = v + if r.header == nil { + req.Header = http.Header{} + } else { + req.Header = r.header.Clone() // headers need to be copied to avoid concurrent map access } if r.body != nil { body, err := r.body() @@ -548,7 +557,7 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) log.G(ctx).WithFields(requestFields(req)).Debug("do request") if err := r.authorize(ctx, req); err != nil { - return nil, errors.Wrap(err, "failed to authorize") + return nil, fmt.Errorf("failed to authorize: %w", err) } var client = &http.Client{} @@ -560,13 +569,16 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { if len(via) >= 10 { return errors.New("stopped after 10 redirects") } - return errors.Wrap(r.authorize(ctx, req), "failed to authorize redirect") + if err := r.authorize(ctx, req); err != nil { + return fmt.Errorf("failed to authorize redirect: %w", err) + } + return nil } } resp, err := ctxhttp.Do(ctx, client, req) if err != nil { - return nil, errors.Wrap(err, "failed to do request") + return nil, fmt.Errorf("failed to do request: %w", err) } log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") return resp, nil @@ -665,3 +677,17 @@ func responseFields(resp *http.Response) logrus.Fields { return logrus.Fields(fields) } + +// IsLocalhost checks if the registry host is local. +func IsLocalhost(host string) bool { + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + + if host == "localhost" { + return true + } + + ip := net.ParseIP(host) + return ip.IsLoopback() +} diff --git a/remotes/docker/resolver_test.go b/remotes/docker/resolver_test.go index 1f2ff8c..073e3c9 100644 --- a/remotes/docker/resolver_test.go +++ b/remotes/docker/resolver_test.go @@ -21,9 +21,9 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "strconv" @@ -32,10 +32,10 @@ import ( "time" "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker/auth" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) func TestHTTPResolver(t *testing.T) { @@ -46,7 +46,6 @@ func TestHTTPResolver(t *testing.T) { base := s.URL[7:] // strip "http://" return base, options, s.Close } - runBasicTest(t, "testname", s) } @@ -54,6 +53,30 @@ func TestHTTPSResolver(t *testing.T) { runBasicTest(t, "testname", tlsServer) } +func TestResolverOptionsRace(t *testing.T) { + header := http.Header{} + header.Set("X-Test", "test") + + s := func(h http.Handler) (string, ResolverOptions, func()) { + s := httptest.NewServer(h) + + options := ResolverOptions{ + Headers: header, + } + base := s.URL[7:] // strip "http://" + return base, options, s.Close + } + + for i := 0; i < 5; i++ { + t.Run(fmt.Sprintf("test ResolverOptions race %d", i), func(t *testing.T) { + // parallel sub tests so the race condition (if not handled) can be caught + // by race detector + t.Parallel() + runBasicTest(t, "testname", s) + }) + } +} + func TestBasicResolver(t *testing.T) { basicAuth := func(h http.Handler) (string, ResolverOptions, func()) { // Wrap with basic auth @@ -138,6 +161,31 @@ func TestRefreshTokenResolver(t *testing.T) { runBasicTest(t, "testname", withTokenServer(th, creds)) } +func TestFetchRefreshToken(t *testing.T) { + f := func(t *testing.T, disablePOST bool) { + name := "testname" + if disablePOST { + name += "-disable-post" + } + var fetchedRefreshToken string + onFetchRefreshToken := func(ctx context.Context, refreshToken string, req *http.Request) { + fetchedRefreshToken = refreshToken + } + srv := newRefreshTokenServer(t, name, disablePOST, onFetchRefreshToken) + runBasicTest(t, name, srv.BasicTestFunc()) + if fetchedRefreshToken != srv.RefreshToken { + t.Errorf("unexpected refresh token: got %q", fetchedRefreshToken) + } + } + + t.Run("POST", func(t *testing.T) { + f(t, false) + }) + t.Run("GET", func(t *testing.T) { + f(t, true) + }) +} + func TestPostBasicAuthTokenResolver(t *testing.T) { th := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { @@ -563,7 +611,7 @@ func testFetch(ctx context.Context, f remotes.Fetcher, desc ocispec.Descriptor) dgstr := desc.Digest.Algorithm().Digester() io.Copy(dgstr.Hash(), r) if dgstr.Digest() != desc.Digest { - return errors.Errorf("content mismatch: %s != %s", dgstr.Digest(), desc.Digest) + return fmt.Errorf("content mismatch: %s != %s", dgstr.Digest(), desc.Digest) } return nil @@ -572,14 +620,14 @@ func testFetch(ctx context.Context, f remotes.Fetcher, desc ocispec.Descriptor) func testocimanifest(ctx context.Context, f remotes.Fetcher, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { r, err := f.Fetch(ctx, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to fetch %s", desc.Digest) + return nil, fmt.Errorf("failed to fetch %s: %w", desc.Digest, err) } - p, err := ioutil.ReadAll(r) + p, err := io.ReadAll(r) if err != nil { return nil, err } if dgst := desc.Digest.Algorithm().FromBytes(p); dgst != desc.Digest { - return nil, errors.Errorf("digest mismatch: %s != %s", dgst, desc.Digest) + return nil, fmt.Errorf("digest mismatch: %s != %s", dgst, desc.Digest) } var manifest ocispec.Manifest @@ -659,3 +707,131 @@ func (m testManifest) RegisterHandler(r *http.ServeMux, name string) { r.Handle(fmt.Sprintf("/v2/%s/blobs/%s", name, c.Digest()), c) } } + +func newRefreshTokenServer(t testing.TB, name string, disablePOST bool, onFetchRefreshToken OnFetchRefreshToken) *refreshTokenServer { + return &refreshTokenServer{ + T: t, + Name: name, + DisablePOST: disablePOST, + OnFetchRefreshToken: onFetchRefreshToken, + AccessToken: "testAccessToken-" + name, + RefreshToken: "testRefreshToken-" + name, + Username: "testUser-" + name, + Password: "testPassword-" + name, + } +} + +type refreshTokenServer struct { + T testing.TB + Name string + DisablePOST bool + OnFetchRefreshToken OnFetchRefreshToken + AccessToken string + RefreshToken string + Username string + Password string +} + +func (srv *refreshTokenServer) isValidAuthorizationHeader(s string) bool { + fields := strings.Fields(s) + return len(fields) == 2 && strings.ToLower(fields[0]) == "bearer" && (fields[1] == srv.RefreshToken || fields[1] == srv.AccessToken) +} + +func (srv *refreshTokenServer) BasicTestFunc() func(h http.Handler) (string, ResolverOptions, func()) { + t := srv.T + return func(h http.Handler) (string, ResolverOptions, func()) { + wrapped := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/token" { + if !srv.isValidAuthorizationHeader(r.Header.Get("Authorization")) { + realm := fmt.Sprintf("https://%s/token", r.Host) + wwwAuthenticateHeader := fmt.Sprintf("Bearer realm=%q,service=registry,scope=\"repository:%s:pull\"", realm, srv.Name) + rw.Header().Set("WWW-Authenticate", wwwAuthenticateHeader) + rw.WriteHeader(http.StatusUnauthorized) + return + } + h.ServeHTTP(rw, r) + return + } + switch r.Method { + case http.MethodGet: // https://docs.docker.com/registry/spec/auth/token/#requesting-a-token + u, p, ok := r.BasicAuth() + if !ok || u != srv.Username || p != srv.Password { + rw.WriteHeader(http.StatusForbidden) + return + } + var resp auth.FetchTokenResponse + resp.Token = srv.AccessToken + resp.AccessToken = srv.AccessToken // alias of Token + query := r.URL.Query() + switch query.Get("offline_token") { + case "true": + resp.RefreshToken = srv.RefreshToken + case "false", "": + default: + rw.WriteHeader(http.StatusBadRequest) + return + } + b, err := json.Marshal(resp) + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + return + } + rw.WriteHeader(http.StatusOK) + rw.Header().Set("Content-Type", "application/json") + t.Logf("GET mode: returning JSON %q, for query %+v", string(b), query) + rw.Write(b) + case http.MethodPost: // https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token + if srv.DisablePOST { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + r.ParseForm() + pf := r.PostForm + if pf.Get("grant_type") != "password" { + rw.WriteHeader(http.StatusBadRequest) + return + } + if pf.Get("username") != srv.Username || pf.Get("password") != srv.Password { + rw.WriteHeader(http.StatusForbidden) + return + } + var resp auth.OAuthTokenResponse + resp.AccessToken = srv.AccessToken + switch pf.Get("access_type") { + case "offline": + resp.RefreshToken = srv.RefreshToken + case "online", "": + default: + rw.WriteHeader(http.StatusBadRequest) + return + } + b, err := json.Marshal(resp) + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + return + } + rw.WriteHeader(http.StatusOK) + rw.Header().Set("Content-Type", "application/json") + t.Logf("POST mode: returning JSON %q, for form %+v", string(b), pf) + rw.Write(b) + default: + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + }) + + base, options, close := tlsServer(wrapped) + authorizer := NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthCreds(func(string) (string, string, error) { + return srv.Username, srv.Password, nil + }), + WithFetchRefreshToken(srv.OnFetchRefreshToken), + ) + options.Hosts = ConfigureDefaultRegistries( + WithClient(options.Client), + WithAuthorizer(authorizer), + ) + return base, options, close + } +} diff --git a/remotes/docker/schema1/converter.go b/remotes/docker/schema1/converter.go index f15a9ac..efa4e8d 100644 --- a/remotes/docker/schema1/converter.go +++ b/remotes/docker/schema1/converter.go @@ -21,16 +21,14 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "strconv" "strings" "sync" "time" - "golang.org/x/sync/errgroup" - "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" @@ -40,7 +38,7 @@ import ( digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" + "golang.org/x/sync/errgroup" ) const ( @@ -159,12 +157,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De history, diffIDs, err := c.schema1ManifestHistory() if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed") + return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err) } var img ocispec.Image if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal image from schema 1 history") + return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err) } img.History = history @@ -175,7 +173,7 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De b, err := json.MarshalIndent(img, "", " ") if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) } config := ocispec.Descriptor{ @@ -199,7 +197,7 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De mb, err := json.MarshalIndent(manifest, "", " ") if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) } desc := ocispec.Descriptor{ @@ -216,12 +214,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De ref := remotes.MakeRefKey(ctx, desc) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest") + return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err) } ref = remotes.MakeRefKey(ctx, config) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config") + return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err) } return desc, nil @@ -230,7 +228,7 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De // ReadStripSignature reads in a schema1 manifest and returns a byte array // with the "signatures" field stripped func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { - b, err := ioutil.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB + b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB if err != nil { return nil, err } @@ -350,7 +348,7 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro if desc.Size == -1 { info, err := c.contentStore.Info(ctx, desc.Digest) if err != nil { - return errors.Wrap(err, "failed to get blob info") + return fmt.Errorf("failed to get blob info: %w", err) } desc.Size = info.Size } @@ -371,7 +369,7 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro } if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { - return errors.Wrap(err, "failed to update uncompressed label") + return fmt.Errorf("failed to update uncompressed label: %w", err) } c.mu.Lock() @@ -385,7 +383,7 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { cinfo, err := c.contentStore.Info(ctx, desc.Digest) if err != nil { - return false, errors.Wrap(err, "failed to get blob info") + return false, fmt.Errorf("failed to get blob info: %w", err) } desc.Size = cinfo.Size @@ -442,7 +440,7 @@ func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest for i := range m.History { var h v1History if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { - return nil, nil, errors.Wrap(err, "failed to unmarshal history") + return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err) } blobSum := m.FSLayers[i].BlobSum @@ -554,7 +552,7 @@ func stripSignature(b []byte) ([]byte, error) { } pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) if err != nil { - return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected) + return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err) } var protected protectedBlock @@ -568,7 +566,7 @@ func stripSignature(b []byte) ([]byte, error) { tail, err := joseBase64UrlDecode(protected.Tail) if err != nil { - return nil, errors.Wrap(err, "invalid tail base 64 value") + return nil, fmt.Errorf("invalid tail base 64 value: %w", err) } return append(b[:protected.Length], tail...), nil diff --git a/remotes/docker/scope.go b/remotes/docker/scope.go index fe57f02..95b4810 100644 --- a/remotes/docker/scope.go +++ b/remotes/docker/scope.go @@ -74,7 +74,7 @@ func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) cont // GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. func GetTokenScopes(ctx context.Context, common []string) []string { - var scopes []string + scopes := []string{} if x := ctx.Value(tokenScopesKey{}); x != nil { scopes = append(scopes, x.([]string)...) } @@ -82,6 +82,10 @@ func GetTokenScopes(ctx context.Context, common []string) []string { scopes = append(scopes, common...) sort.Strings(scopes) + if len(scopes) == 0 { + return scopes + } + l := 0 for idx := 1; idx < len(scopes); idx++ { // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) diff --git a/remotes/docker/scope_test.go b/remotes/docker/scope_test.go index a122978..42e9f7c 100644 --- a/remotes/docker/scope_test.go +++ b/remotes/docker/scope_test.go @@ -63,6 +63,11 @@ func TestGetTokenScopes(t *testing.T) { commonScopes []string expected []string }{ + { + scopesInCtx: []string{}, + commonScopes: []string{}, + expected: []string{}, + }, { scopesInCtx: []string{}, commonScopes: []string{"repository:foo/bar:pull"}, diff --git a/remotes/docker/status.go b/remotes/docker/status.go index 9751eda..1f7b278 100644 --- a/remotes/docker/status.go +++ b/remotes/docker/status.go @@ -17,12 +17,12 @@ package docker import ( + "fmt" "sync" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/moby/locker" - "github.com/pkg/errors" ) // Status of a content operation @@ -31,6 +31,9 @@ type Status struct { Committed bool + // ErrClosed contains error encountered on close. + ErrClosed error + // UploadUUID is used by the Docker registry to reference blob uploads UploadUUID string } @@ -67,7 +70,7 @@ func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) { defer t.m.Unlock() status, ok := t.statuses[ref] if !ok { - return Status{}, errors.Wrapf(errdefs.ErrNotFound, "status for ref %v", ref) + return Status{}, fmt.Errorf("status for ref %v: %w", ref, errdefs.ErrNotFound) } return status, nil } diff --git a/remotes/errors/errors.go b/remotes/errors/errors.go index 519dbac..67ccb23 100644 --- a/remotes/errors/errors.go +++ b/remotes/errors/errors.go @@ -19,7 +19,6 @@ package errors import ( "fmt" "io" - "io/ioutil" "net/http" ) @@ -41,7 +40,7 @@ func (e ErrUnexpectedStatus) Error() string { func NewUnexpectedStatusErr(resp *http.Response) error { var b []byte if resp.Body != nil { - b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + b, _ = io.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB } err := ErrUnexpectedStatus{ Body: b, diff --git a/remotes/handlers.go b/remotes/handlers.go index 8f79c60..4d91ed2 100644 --- a/remotes/handlers.go +++ b/remotes/handlers.go @@ -18,6 +18,7 @@ package remotes import ( "context" + "errors" "fmt" "io" "strings" @@ -29,7 +30,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -127,13 +127,13 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc // most likely a poorly configured registry/web front end which responded with no // Content-Length header; unable (not to mention useless) to commit a 0-length entry // into the content store. Error out here otherwise the error sent back is confusing - return errors.Wrapf(errdefs.ErrInvalidArgument, "unable to fetch descriptor (%s) which reports content size of zero", desc.Digest) + return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument) } if ws.Offset == desc.Size { // If writer is already complete, commit and return err := cw.Commit(ctx, desc.Size, desc.Digest) if err != nil && !errdefs.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) } return nil } @@ -243,8 +243,8 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st // as a marker for this problem if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex || manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) && - errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") { - return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry") + errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { + return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err) } return err } @@ -253,6 +253,43 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st return nil } +// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable". +// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed. +// +// This is based on the media type of the content: +// - application/vnd.oci.image.layer.nondistributable +// - application/vnd.docker.image.rootfs.foreign +func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if images.IsNonDistributable(desc.MediaType) { + log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob") + return nil, images.ErrSkipDesc + } + + if images.IsLayerType(desc.MediaType) { + return nil, nil + } + + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + if len(children) == 0 { + return nil, nil + } + + out := make([]ocispec.Descriptor, 0, len(children)) + for _, child := range children { + if !images.IsNonDistributable(child.MediaType) { + out = append(out, child) + } else { + log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob") + } + } + return out, nil + } +} + // FilterManifestByPlatformHandler allows Handler to handle non-target // platform's manifest and configuration data. func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { diff --git a/remotes/handlers_test.go b/remotes/handlers_test.go index 56db983..c0446e1 100644 --- a/remotes/handlers_test.go +++ b/remotes/handlers_test.go @@ -18,9 +18,15 @@ package remotes import ( "context" + _ "crypto/sha256" + "encoding/json" + "sync" "testing" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" "github.com/containerd/containerd/images" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -71,3 +77,141 @@ func TestContextCustomKeyPrefix(t *testing.T) { } }) } + +func TestSkipNonDistributableBlobs(t *testing.T) { + ctx := context.Background() + + out, err := SkipNonDistributableBlobs(images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return []ocispec.Descriptor{ + {MediaType: images.MediaTypeDockerSchema2Layer, Digest: "test:1"}, + {MediaType: images.MediaTypeDockerSchema2LayerForeign, Digest: "test:2"}, + {MediaType: images.MediaTypeDockerSchema2LayerForeignGzip, Digest: "test:3"}, + {MediaType: ocispec.MediaTypeImageLayerNonDistributable, Digest: "test:4"}, + {MediaType: ocispec.MediaTypeImageLayerNonDistributableGzip, Digest: "test:5"}, + {MediaType: ocispec.MediaTypeImageLayerNonDistributableZstd, Digest: "test:6"}, + }, nil + }))(ctx, ocispec.Descriptor{MediaType: images.MediaTypeDockerSchema2Manifest}) + if err != nil { + t.Fatal(err) + } + + if len(out) != 1 { + t.Fatalf("unexpected number of descriptors returned: %d", len(out)) + } + if out[0].Digest != "test:1" { + t.Fatalf("unexpected digest returned: %s", out[0].Digest) + } + + dir := t.TempDir() + cs, err := local.NewLabeledStore(dir, newMemoryLabelStore()) + if err != nil { + t.Fatal(err) + } + + write := func(i interface{}, ref string) digest.Digest { + t.Helper() + + data, err := json.Marshal(i) + if err != nil { + t.Fatal(err) + } + + w, err := cs.Writer(ctx, content.WithRef(ref)) + if err != nil { + t.Fatal(err) + } + defer w.Close() + + dgst := digest.SHA256.FromBytes(data) + + n, err := w.Write(data) + if err != nil { + t.Fatal(err) + } + + if err := w.Commit(ctx, int64(n), dgst); err != nil { + t.Fatal(err) + } + + return dgst + } + + configDigest := write(ocispec.ImageConfig{}, "config") + + manifest := ocispec.Manifest{ + Config: ocispec.Descriptor{Digest: configDigest, MediaType: ocispec.MediaTypeImageConfig}, + MediaType: ocispec.MediaTypeImageManifest, + Layers: []ocispec.Descriptor{ + {MediaType: images.MediaTypeDockerSchema2Layer, Digest: "test:1"}, + {MediaType: images.MediaTypeDockerSchema2LayerForeign, Digest: "test:2"}, + {MediaType: images.MediaTypeDockerSchema2LayerForeignGzip, Digest: "test:3"}, + {MediaType: ocispec.MediaTypeImageLayerNonDistributable, Digest: "test:4"}, + {MediaType: ocispec.MediaTypeImageLayerNonDistributableGzip, Digest: "test:5"}, + {MediaType: ocispec.MediaTypeImageLayerNonDistributableZstd, Digest: "test:6"}, + }, + } + + manifestDigest := write(manifest, "manifest") + + out, err = SkipNonDistributableBlobs(images.ChildrenHandler(cs))(ctx, ocispec.Descriptor{MediaType: manifest.MediaType, Digest: manifestDigest}) + if err != nil { + t.Fatal(err) + } + + if len(out) != 2 { + t.Fatalf("unexpected number of descriptors returned: %v", out) + } + + if out[0].Digest != configDigest { + t.Fatalf("unexpected digest returned: %v", out[0]) + } + if out[1].Digest != manifest.Layers[0].Digest { + t.Fatalf("unexpected digest returned: %v", out[1]) + } +} + +type memoryLabelStore struct { + l sync.Mutex + labels map[digest.Digest]map[string]string +} + +func newMemoryLabelStore() local.LabelStore { + return &memoryLabelStore{ + labels: map[digest.Digest]map[string]string{}, + } +} + +func (mls *memoryLabelStore) Get(d digest.Digest) (map[string]string, error) { + mls.l.Lock() + labels := mls.labels[d] + mls.l.Unlock() + + return labels, nil +} + +func (mls *memoryLabelStore) Set(d digest.Digest, labels map[string]string) error { + mls.l.Lock() + mls.labels[d] = labels + mls.l.Unlock() + + return nil +} + +func (mls *memoryLabelStore) Update(d digest.Digest, update map[string]string) (map[string]string, error) { + mls.l.Lock() + labels, ok := mls.labels[d] + if !ok { + labels = map[string]string{} + } + for k, v := range update { + if v == "" { + delete(labels, k) + } else { + labels[k] = v + } + } + mls.labels[d] = labels + mls.l.Unlock() + + return labels, nil +} diff --git a/reports/2017-01-13.md b/reports/2017-01-13.md index bf9b3e1..0275e02 100644 --- a/reports/2017-01-13.md +++ b/reports/2017-01-13.md @@ -41,6 +41,6 @@ We finished porting over the shim from the existing containerd implementation th Next week we will be working towards a full PoC with the runtime, storage, and fetching of images. Getting the core functionality up and running quickly is important to us to ensure that integration between the different subsystems in the core flow well together. We want to make sure the responsibilities of pulling an image from a remote source do not spill into the storage layer and vice-versa. -We still have more documentation work to do on the design and lifecycle of components in the core which is another focus for next week. You can find the current design docs in the [repo here](https://github.com/containerd/containerd/tree/master/design). +We still have more documentation work to do on the design and lifecycle of components in the core which is another focus for next week. You can find the current design docs in the [repo here](https://github.com/containerd/containerd/tree/main/design). I hope this status report helps. If you are looking for ways to contribute, check out the issues on the current milestone. diff --git a/rootfs/apply.go b/rootfs/apply.go index f1ca624..35eae6d 100644 --- a/rootfs/apply.go +++ b/rootfs/apply.go @@ -18,9 +18,9 @@ package rootfs import ( "context" + "crypto/rand" "encoding/base64" "fmt" - "math/rand" "time" "github.com/containerd/containerd/diff" @@ -31,7 +31,6 @@ import ( "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // Layer represents the descriptors for a layer diff. These descriptions @@ -68,7 +67,7 @@ func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snaps _, err := sn.Stat(ctx, chainID.String()) if err != nil { if !errdefs.IsNotFound(err) { - return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID) + return "", fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) } if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) { @@ -96,7 +95,7 @@ func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, ) if _, err := sn.Stat(ctx, chainID); err != nil { if !errdefs.IsNotFound(err) { - return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID) + return false, fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) } if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil { @@ -143,7 +142,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn } // Already exists should have the caller retry - return errors.Wrapf(err, "failed to prepare extraction snapshot %q", key) + return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err) } break @@ -162,16 +161,16 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...) if err != nil { - err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest) + err = fmt.Errorf("failed to extract layer %s: %w", layer.Diff.Digest, err) return err } if diff.Digest != layer.Diff.Digest { - err = errors.Errorf("wrong diff id calculated on extraction %q", diff.Digest) + err = fmt.Errorf("wrong diff id calculated on extraction %q", diff.Digest) return err } if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil { - err = errors.Wrapf(err, "failed to commit snapshot %s", key) + err = fmt.Errorf("failed to commit snapshot %s: %w", key, err) return err } diff --git a/rootfs/diff.go b/rootfs/diff.go index f396c73..226cebc 100644 --- a/rootfs/diff.go +++ b/rootfs/diff.go @@ -44,7 +44,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter return ocispec.Descriptor{}, err } - lowerKey := fmt.Sprintf("%s-parent-view", info.Parent) + lowerKey := fmt.Sprintf("%s-parent-view-%s", info.Parent, uniquePart()) lower, err := sn.View(ctx, lowerKey, info.Parent) if err != nil { return ocispec.Descriptor{}, err @@ -58,7 +58,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter return ocispec.Descriptor{}, err } } else { - upperKey := fmt.Sprintf("%s-view", snapshotID) + upperKey := fmt.Sprintf("%s-view-%s", snapshotID, uniquePart()) upper, err = sn.View(ctx, upperKey, snapshotID) if err != nil { return ocispec.Descriptor{}, err diff --git a/rootfs/init.go b/rootfs/init.go index 9316b9d..02d13bf 100644 --- a/rootfs/init.go +++ b/rootfs/init.go @@ -18,15 +18,14 @@ package rootfs import ( "context" + "errors" "fmt" - "io/ioutil" "os" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) var ( @@ -45,7 +44,7 @@ type Mounter interface { func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshots.Snapshotter, mounter Mounter) ([]mount.Mount, error) { _, err := snapshotter.Stat(ctx, name) if err == nil { - return nil, errors.Errorf("rootfs already exists") + return nil, errors.New("rootfs already exists") } // TODO: ensure not exist error once added to snapshot package @@ -75,7 +74,7 @@ func createInitLayer(ctx context.Context, parent, initName string, initFn func(s // TODO: ensure not exist error once added to snapshot package // Create tempdir - td, err := ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "create-init-") + td, err := os.MkdirTemp(os.Getenv("XDG_RUNTIME_DIR"), "create-init-") if err != nil { return "", err } diff --git a/rootfs/init_other.go b/rootfs/init_other.go index 2611210..d8e38d4 100644 --- a/rootfs/init_other.go +++ b/rootfs/init_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/runtime/monitor.go b/runtime/monitor.go index eb07ebd..df5cf7f 100644 --- a/runtime/monitor.go +++ b/runtime/monitor.go @@ -18,10 +18,11 @@ package runtime // TaskMonitor provides an interface for monitoring of containers within containerd type TaskMonitor interface { - // Monitor adds the provided container to the monitor - Monitor(Task) error + // Monitor adds the provided container to the monitor. + // Labels are optional (can be nil) key value pairs to be added to the metrics namespace. + Monitor(task Task, labels map[string]string) error // Stop stops and removes the provided container from the monitor - Stop(Task) error + Stop(task Task) error } // NewMultiTaskMonitor returns a new TaskMonitor broadcasting to the provided monitors @@ -39,7 +40,7 @@ func NewNoopMonitor() TaskMonitor { type noopTaskMonitor struct { } -func (mm *noopTaskMonitor) Monitor(c Task) error { +func (mm *noopTaskMonitor) Monitor(c Task, labels map[string]string) error { return nil } @@ -51,9 +52,9 @@ type multiTaskMonitor struct { monitors []TaskMonitor } -func (mm *multiTaskMonitor) Monitor(c Task) error { +func (mm *multiTaskMonitor) Monitor(task Task, labels map[string]string) error { for _, m := range mm.monitors { - if err := m.Monitor(c); err != nil { + if err := m.Monitor(task, labels); err != nil { return err } } diff --git a/runtime/opts/opts_linux.go b/runtime/opts/opts_linux.go index eb509da..9bf94f4 100644 --- a/runtime/opts/opts_linux.go +++ b/runtime/opts/opts_linux.go @@ -29,9 +29,6 @@ func WithNamespaceCgroupDeletion(ctx context.Context, i *namespaces.DeleteInfo) if cgroups.Mode() == cgroups.Unified { cg, err := cgroupsv2.LoadManager("/sys/fs/cgroup", i.Name) if err != nil { - if err == cgroupsv2.ErrCgroupDeleted { - return nil - } return err } return cg.Delete() diff --git a/runtime/restart/monitor/change.go b/runtime/restart/monitor/change.go index 7de6f67..a74b3dc 100644 --- a/runtime/restart/monitor/change.go +++ b/runtime/restart/monitor/change.go @@ -18,12 +18,12 @@ package monitor import ( "context" + "fmt" "net/url" "syscall" "github.com/containerd/containerd" "github.com/containerd/containerd/cio" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -49,7 +49,7 @@ func (s *startChange) apply(ctx context.Context, client *containerd.Client) erro if s.logURI != "" { uri, err := url.Parse(s.logURI) if err != nil { - return errors.Wrapf(err, "failed to parse %v into url", s.logURI) + return fmt.Errorf("failed to parse %v into url: %w", s.logURI, err) } log = cio.LogURI(uri) } else if s.logPath != "" { diff --git a/runtime/restart/monitor/monitor.go b/runtime/restart/monitor/monitor.go index 1c657bb..a504ad9 100644 --- a/runtime/restart/monitor/monitor.go +++ b/runtime/restart/monitor/monitor.go @@ -35,7 +35,6 @@ import ( "github.com/containerd/containerd/runtime/restart" "github.com/containerd/containerd/services" "github.com/containerd/containerd/snapshots" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -63,6 +62,7 @@ func init() { plugin.Register(&plugin.Registration{ Type: plugin.InternalPlugin, Requires: []plugin.Type{ + plugin.EventPlugin, plugin.ServicePlugin, }, ID: "restart", @@ -93,32 +93,38 @@ func init() { func getServicesOpts(ic *plugin.InitContext) ([]containerd.ServicesOpt, error) { plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { - return nil, errors.Wrap(err, "failed to get service plugin") + return nil, fmt.Errorf("failed to get service plugin: %w", err) } + + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, fmt.Errorf("failed to get event plugin: %w", err) + } + opts := []containerd.ServicesOpt{ - containerd.WithEventService(ic.Events), + containerd.WithEventService(ep.(containerd.EventService)), } for s, fn := range map[string]func(interface{}) containerd.ServicesOpt{ services.ContentService: func(s interface{}) containerd.ServicesOpt { return containerd.WithContentStore(s.(content.Store)) }, services.ImagesService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithImageService(s.(images.ImagesClient)) + return containerd.WithImageClient(s.(images.ImagesClient)) }, services.SnapshotsService: func(s interface{}) containerd.ServicesOpt { return containerd.WithSnapshotters(s.(map[string]snapshots.Snapshotter)) }, services.ContainersService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithContainerService(s.(containers.ContainersClient)) + return containerd.WithContainerClient(s.(containers.ContainersClient)) }, services.TasksService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithTaskService(s.(tasks.TasksClient)) + return containerd.WithTaskClient(s.(tasks.TasksClient)) }, services.DiffService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithDiffService(s.(diff.DiffClient)) + return containerd.WithDiffClient(s.(diff.DiffClient)) }, services.NamespacesService: func(s interface{}) containerd.ServicesOpt { - return containerd.WithNamespaceService(s.(namespacesapi.NamespacesClient)) + return containerd.WithNamespaceClient(s.(namespacesapi.NamespacesClient)) }, services.LeasesService: func(s interface{}) containerd.ServicesOpt { return containerd.WithLeasesService(s.(leases.Manager)) @@ -126,14 +132,14 @@ func getServicesOpts(ic *plugin.InitContext) ([]containerd.ServicesOpt, error) { } { p := plugins[s] if p == nil { - return nil, errors.Errorf("service %q not found", s) + return nil, fmt.Errorf("service %q not found", s) } i, err := p.Instance() if err != nil { - return nil, errors.Wrapf(err, "failed to get instance of service %q", s) + return nil, fmt.Errorf("failed to get instance of service %q: %w", s, err) } if i == nil { - return nil, errors.Errorf("instance of service %q not found", s) + return nil, fmt.Errorf("instance of service %q not found", s) } opts = append(opts, fn(i)) } diff --git a/runtime/runtime.go b/runtime/runtime.go index 3d758fb..84aaa8a 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -46,7 +46,8 @@ type CreateOpts struct { RuntimeOptions *types.Any // TaskOptions received for the task TaskOptions *types.Any - // Runtime to use + // Runtime name to use (e.g. `io.containerd.NAME.VERSION`). + // As an alternative full abs path to binary may be specified instead. Runtime string } @@ -69,8 +70,6 @@ type PlatformRuntime interface { // Tasks returns all the current tasks for the runtime. // Any container runs at most one task at a time. Tasks(ctx context.Context, all bool) ([]Task, error) - // Add adds a task into runtime. - Add(ctx context.Context, task Task) error // Delete remove a task. - Delete(ctx context.Context, taskID string) + Delete(ctx context.Context, taskID string) (*Exit, error) } diff --git a/runtime/task.go b/runtime/task.go index c9876ed..e453fa3 100644 --- a/runtime/task.go +++ b/runtime/task.go @@ -47,6 +47,14 @@ type Process interface { Start(ctx context.Context) error // Wait for the process to exit Wait(ctx context.Context) (*Exit, error) +} + +// ExecProcess is a process spawned in container via Task.Exec call. +// The only difference from a regular `Process` is that exec process can delete self, +// while task process requires slightly more complex logic and needs to be deleted through the task manager. +type ExecProcess interface { + Process + // Delete deletes the process Delete(ctx context.Context) (*Exit, error) } @@ -56,7 +64,7 @@ type Task interface { Process // PID of the process - PID() uint32 + PID(ctx context.Context) (uint32, error) // Namespace that the task exists in Namespace() string // Pause pauses the container process @@ -64,7 +72,7 @@ type Task interface { // Resume unpauses the container process Resume(ctx context.Context) error // Exec adds a process into the container - Exec(ctx context.Context, id string, opts ExecOpts) (Process, error) + Exec(ctx context.Context, id string, opts ExecOpts) (ExecProcess, error) // Pids returns all pids Pids(ctx context.Context) ([]ProcessInfo, error) // Checkpoint checkpoints a container to an image with live system data @@ -72,7 +80,7 @@ type Task interface { // Update sets the provided resources to a running task Update(ctx context.Context, resources *types.Any, annotations map[string]string) error // Process returns a process within the task for the provided id - Process(ctx context.Context, id string) (Process, error) + Process(ctx context.Context, id string) (ExecProcess, error) // Stats returns runtime specific metrics for a task Stats(ctx context.Context) (*types.Any, error) } diff --git a/runtime/task_list.go b/runtime/task_list.go index b92c6e0..f24d689 100644 --- a/runtime/task_list.go +++ b/runtime/task_list.go @@ -18,10 +18,11 @@ package runtime import ( "context" + "errors" + "fmt" "sync" "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" ) var ( @@ -109,7 +110,7 @@ func (l *TaskList) AddWithNamespace(namespace string, t Task) error { l.tasks[namespace] = make(map[string]Task) } if _, ok := l.tasks[namespace][id]; ok { - return errors.Wrap(ErrTaskAlreadyExists, id) + return fmt.Errorf("%s: %w", id, ErrTaskAlreadyExists) } l.tasks[namespace][id] = t return nil @@ -128,3 +129,16 @@ func (l *TaskList) Delete(ctx context.Context, id string) { delete(tasks, id) } } + +func (l *TaskList) IsEmpty() bool { + l.mu.Lock() + defer l.mu.Unlock() + + for ns := range l.tasks { + if len(l.tasks[ns]) > 0 { + return false + } + } + + return true +} diff --git a/runtime/v1/linux/bundle.go b/runtime/v1/linux/bundle.go index 48d81e8..b1830d0 100644 --- a/runtime/v1/linux/bundle.go +++ b/runtime/v1/linux/bundle.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -23,7 +24,6 @@ import ( "crypto/sha256" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" @@ -32,7 +32,6 @@ import ( "github.com/containerd/containerd/runtime/v1/shim" "github.com/containerd/containerd/runtime/v1/shim/client" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // loadBundle loads an existing bundle from disk @@ -74,7 +73,7 @@ func newBundle(id, path, workDir string, spec []byte) (b *bundle, err error) { if err := os.MkdirAll(rootfs, 0711); err != nil { return nil, err } - err = ioutil.WriteFile(filepath.Join(path, configFilename), spec, 0666) + err = os.WriteFile(filepath.Join(path, configFilename), spec, 0666) return &bundle{ id: id, path: path, @@ -185,7 +184,7 @@ func (b *bundle) Delete() error { if err2 == nil { return err } - return errors.Wrapf(err, "Failed to remove both bundle and workdir locations: %v", err2) + return fmt.Errorf("Failed to remove both bundle and workdir locations: %v: %w", err2, err) } func (b *bundle) legacyShimAddress(namespace string) string { @@ -201,7 +200,7 @@ func (b *bundle) shimAddress(namespace, socketPath string) string { func (b *bundle) loadAddress() (string, error) { addressPath := filepath.Join(b.path, "address") - data, err := ioutil.ReadFile(addressPath) + data, err := os.ReadFile(addressPath) if err != nil { return "", err } diff --git a/runtime/v1/linux/bundle_test.go b/runtime/v1/linux/bundle_test.go index e021dda..2424110 100644 --- a/runtime/v1/linux/bundle_test.go +++ b/runtime/v1/linux/bundle_test.go @@ -22,7 +22,6 @@ package linux import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -49,9 +48,7 @@ func TestNewBundle(t *testing.T) { for i, tc := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - dir, err := ioutil.TempDir("", "test-new-bundle") - require.NoError(t, err, "failed to create test directory") - defer os.RemoveAll(dir) + dir := t.TempDir() work := filepath.Join(dir, "work") state := filepath.Join(dir, "state") id := fmt.Sprintf("new-bundle-%d", i) diff --git a/runtime/v1/linux/process.go b/runtime/v1/linux/process.go index c277745..b7f9e0d 100644 --- a/runtime/v1/linux/process.go +++ b/runtime/v1/linux/process.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,6 +21,7 @@ package linux import ( "context" + "errors" eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types/task" @@ -27,7 +29,6 @@ import ( "github.com/containerd/containerd/runtime" shim "github.com/containerd/containerd/runtime/v1/shim/v1" "github.com/containerd/ttrpc" - "github.com/pkg/errors" ) // Process implements a linux process diff --git a/runtime/v1/linux/runtime.go b/runtime/v1/linux/runtime.go index aa6d3f3..b6d5382 100644 --- a/runtime/v1/linux/runtime.go +++ b/runtime/v1/linux/runtime.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,9 +21,9 @@ package linux import ( "context" + "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "time" @@ -43,12 +44,11 @@ import ( "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" v1 "github.com/containerd/containerd/runtime/v1" - shim "github.com/containerd/containerd/runtime/v1/shim/v1" - runc "github.com/containerd/go-runc" + "github.com/containerd/containerd/runtime/v1/shim/v1" + "github.com/containerd/go-runc" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -73,6 +73,7 @@ func init() { ID: "linux", InitFn: New, Requires: []plugin.Type{ + plugin.EventPlugin, plugin.MetadataPlugin, }, Config: &Config{ @@ -112,6 +113,12 @@ func New(ic *plugin.InitContext) (interface{}, error) { if err != nil { return nil, err } + + ep, err := ic.GetByID(plugin.EventPlugin, "exchange") + if err != nil { + return nil, err + } + cfg := ic.Config.(*Config) r := &Runtime{ root: ic.Root, @@ -119,7 +126,7 @@ func New(ic *plugin.InitContext) (interface{}, error) { tasks: runtime.NewTaskList(), containers: metadata.NewContainerStore(m.(*metadata.DB)), address: ic.Address, - events: ic.Events, + events: ep.(*exchange.Exchange), config: cfg, } tasks, err := r.restoreTasks(ic.Context) @@ -160,7 +167,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts } if err := identifiers.Validate(id); err != nil { - return nil, errors.Wrapf(err, "invalid task id") + return nil, fmt.Errorf("invalid task id: %w", err) } ropts, err := r.getRuncOptions(ctx, id) @@ -280,7 +287,7 @@ func (r *Runtime) Tasks(ctx context.Context, all bool) ([]runtime.Task, error) { } func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) { - dir, err := ioutil.ReadDir(r.state) + dir, err := os.ReadDir(r.state) if err != nil { return nil, err } @@ -315,12 +322,24 @@ func (r *Runtime) Add(ctx context.Context, task runtime.Task) error { } // Delete a runtime task -func (r *Runtime) Delete(ctx context.Context, id string) { +func (r *Runtime) Delete(ctx context.Context, id string) (*runtime.Exit, error) { + task, err := r.tasks.Get(ctx, id) + if err != nil { + return nil, err + } + + s := task.(*Task) + exit, err := s.Delete(ctx) + if err != nil { + return nil, err + } + r.tasks.Delete(ctx, id) + return exit, nil } func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { - dir, err := ioutil.ReadDir(filepath.Join(r.state, ns)) + dir, err := os.ReadDir(filepath.Join(r.state, ns)) if err != nil { return nil, err } @@ -393,7 +412,7 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { if r.config.ShimDebug { go copyAndClose(os.Stdout, shimStdoutLog) } else { - go copyAndClose(ioutil.Discard, shimStdoutLog) + go copyAndClose(io.Discard, shimStdoutLog) } shimStderrLog, err := v1.OpenShimStderrLog(ctx, logDirPath) @@ -408,7 +427,7 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { if r.config.ShimDebug { go copyAndClose(os.Stderr, shimStderrLog) } else { - go copyAndClose(ioutil.Discard, shimStderrLog) + go copyAndClose(io.Discard, shimStderrLog) } t, err := newTask(id, ns, pid, s, r.events, r.tasks, bundle) @@ -431,7 +450,7 @@ func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, ctx = namespaces.WithNamespace(ctx, ns) if err := r.terminate(ctx, bundle, ns, id); err != nil { if r.config.ShimDebug { - return errors.Wrap(err, "failed to terminate task, leaving bundle for debugging") + return fmt.Errorf("failed to terminate task, leaving bundle for debugging: %w", err) } log.G(ctx).WithError(err).Warn("failed to terminate task") } diff --git a/runtime/v1/linux/task.go b/runtime/v1/linux/task.go index 1e9e50c..3ac7839 100644 --- a/runtime/v1/linux/task.go +++ b/runtime/v1/linux/task.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,6 +21,8 @@ package linux import ( "context" + "errors" + "fmt" "sync" "github.com/containerd/cgroups" @@ -35,7 +38,6 @@ import ( "github.com/containerd/ttrpc" "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" ) // Task on a linux based system @@ -85,8 +87,8 @@ func (t *Task) Namespace() string { } // PID of the task -func (t *Task) PID() uint32 { - return uint32(t.pid) +func (t *Task) PID(_ context.Context) (uint32, error) { + return uint32(t.pid), nil } // Delete the task and return the exit status @@ -226,9 +228,9 @@ func (t *Task) Kill(ctx context.Context, signal uint32, all bool) error { } // Exec creates a new process inside the task -func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) { +func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.ExecProcess, error) { if err := identifiers.Validate(id); err != nil { - return nil, errors.Wrapf(err, "invalid exec id") + return nil, fmt.Errorf("invalid exec id: %w", err) } request := &shim.ExecProcessRequest{ ID: id, @@ -316,7 +318,7 @@ func (t *Task) Update(ctx context.Context, resources *types.Any, _ map[string]st } // Process returns a specific process inside the task by the process id -func (t *Task) Process(ctx context.Context, id string) (runtime.Process, error) { +func (t *Task) Process(ctx context.Context, id string) (runtime.ExecProcess, error) { p := &Process{ id: id, t: t, @@ -332,7 +334,7 @@ func (t *Task) Stats(ctx context.Context) (*types.Any, error) { t.mu.Lock() defer t.mu.Unlock() if t.cg == nil { - return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist") + return nil, fmt.Errorf("cgroup does not exist: %w", errdefs.ErrNotFound) } stats, err := t.cg.Stat(cgroups.IgnoreNotExist) if err != nil { @@ -346,7 +348,7 @@ func (t *Task) Cgroup() (cgroups.Cgroup, error) { t.mu.Lock() defer t.mu.Unlock() if t.cg == nil { - return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist") + return nil, fmt.Errorf("cgroup does not exist: %w", errdefs.ErrNotFound) } return t.cg, nil } diff --git a/runtime/v1/shim.go b/runtime/v1/shim.go index 0a2018b..34d0dbc 100644 --- a/runtime/v1/shim.go +++ b/runtime/v1/shim.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/runtime/v1/shim/client/client.go b/runtime/v1/shim/client/client.go index a8afb0e..6e5eae5 100644 --- a/runtime/v1/shim/client/client.go +++ b/runtime/v1/shim/client/client.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,12 +21,11 @@ package client import ( "context" + "errors" "fmt" "io" - "io/ioutil" "net" "os" - "os/exec" "path/filepath" "strconv" "strings" @@ -33,20 +33,17 @@ import ( "syscall" "time" - "golang.org/x/sys/unix" - - "github.com/containerd/ttrpc" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/pkg/dialer" v1 "github.com/containerd/containerd/runtime/v1" "github.com/containerd/containerd/runtime/v1/shim" shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" "github.com/containerd/containerd/sys" + "github.com/containerd/ttrpc" ptypes "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" + "golang.org/x/sys/unix" ) var empty = &ptypes.Empty{} @@ -63,7 +60,7 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa return nil, nil, err } if err := RemoveSocket(address); err != nil { - return nil, nil, errors.Wrap(err, "remove already used socket") + return nil, nil, fmt.Errorf("remove already used socket: %w", err) } if socket, err = newSocket(address); err != nil { return nil, nil, err @@ -72,20 +69,20 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa f, err := socket.File() if err != nil { - return nil, nil, errors.Wrapf(err, "failed to get fd for socket %s", address) + return nil, nil, fmt.Errorf("failed to get fd for socket %s: %w", address, err) } defer f.Close() - stdoutCopy := ioutil.Discard - stderrCopy := ioutil.Discard + stdoutCopy := io.Discard + stderrCopy := io.Discard stdoutLog, err := v1.OpenShimStdoutLog(ctx, config.WorkDir) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to create stdout log") + return nil, nil, fmt.Errorf("failed to create stdout log: %w", err) } stderrLog, err := v1.OpenShimStderrLog(ctx, config.WorkDir) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to create stderr log") + return nil, nil, fmt.Errorf("failed to create stderr log: %w", err) } if debug { stdoutCopy = os.Stdout @@ -100,7 +97,7 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa return nil, nil, err } if err := cmd.Start(); err != nil { - return nil, nil, errors.Wrapf(err, "failed to start shim") + return nil, nil, fmt.Errorf("failed to start shim: %w", err) } defer func() { if err != nil { @@ -146,14 +143,14 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa } c, clo, err := WithConnect(address, func() {})(ctx, config) if err != nil { - return nil, nil, errors.Wrap(err, "failed to connect") + return nil, nil, fmt.Errorf("failed to connect: %w", err) } return c, clo, nil } } func eaddrinuse(err error) bool { - cause := errors.Cause(err) + cause := errors.Unwrap(err) netErr, ok := cause.(*net.OpError) if !ok { return false @@ -179,11 +176,11 @@ func setupOOMScore(shimPid int) error { pid := os.Getpid() score, err := sys.GetOOMScoreAdj(pid) if err != nil { - return errors.Wrap(err, "get daemon OOM score") + return fmt.Errorf("get daemon OOM score: %w", err) } shimScore := score + 1 if err := sys.AdjustOOMScore(shimPid, shimScore); err != nil { - return errors.Wrap(err, "set shim OOM score") + return fmt.Errorf("set shim OOM score: %w", err) } return nil } @@ -267,7 +264,7 @@ func (s socket) path() string { func newSocket(address string) (*net.UnixListener, error) { if len(address) > socketPathLimit { - return nil, errors.Errorf("%q: unix socket path too long (> %d)", address, socketPathLimit) + return nil, fmt.Errorf("%q: unix socket path too long (> %d)", address, socketPathLimit) } var ( sock = socket(address) @@ -275,12 +272,12 @@ func newSocket(address string) (*net.UnixListener, error) { ) if !sock.isAbstract() { if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, errors.Wrapf(err, "%s", path) + return nil, fmt.Errorf("%s: %w", path, err) } } l, err := net.Listen("unix", path) if err != nil { - return nil, errors.Wrapf(err, "failed to listen to unix socket %q (abstract: %t)", address, sock.isAbstract()) + return nil, fmt.Errorf("failed to listen to unix socket %q (abstract: %t): %w", address, sock.isAbstract(), err) } if err := os.Chmod(path, 0600); err != nil { l.Close() @@ -300,12 +297,19 @@ func RemoveSocket(address string) error { return nil } +// AnonDialer returns a dialer for a socket +// +// NOTE: It is only used for testing. +func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { + return anonDialer(address, timeout) +} + func connect(address string, d func(string, time.Duration) (net.Conn, error)) (net.Conn, error) { return d(address, 100*time.Second) } func anonDialer(address string, timeout time.Duration) (net.Conn, error) { - return dialer.Dialer(socket(address).path(), timeout) + return net.DialTimeout("unix", socket(address).path(), timeout) } // WithConnect connects to an existing shim diff --git a/runtime/v1/shim/client/client_linux.go b/runtime/v1/shim/client/client_linux.go index 2519380..15cf505 100644 --- a/runtime/v1/shim/client/client_linux.go +++ b/runtime/v1/shim/client/client_linux.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. @@ -19,11 +17,11 @@ package client import ( - "os/exec" + "fmt" "syscall" "github.com/containerd/cgroups" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) func getSysProcAttr() *syscall.SysProcAttr { @@ -35,12 +33,12 @@ func getSysProcAttr() *syscall.SysProcAttr { func setCgroup(cgroupPath string, cmd *exec.Cmd) error { cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(cgroupPath)) if err != nil { - return errors.Wrapf(err, "failed to load cgroup %s", cgroupPath) + return fmt.Errorf("failed to load cgroup %s: %w", cgroupPath, err) } if err := cg.Add(cgroups.Process{ Pid: cmd.Process.Pid, }); err != nil { - return errors.Wrapf(err, "failed to join cgroup %s", cgroupPath) + return fmt.Errorf("failed to join cgroup %s: %w", cgroupPath, err) } return nil } diff --git a/runtime/v1/shim/client/client_unix.go b/runtime/v1/shim/client/client_unix.go index 8a5b22f..5da6151 100644 --- a/runtime/v1/shim/client/client_unix.go +++ b/runtime/v1/shim/client/client_unix.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows // +build !linux,!windows /* @@ -19,8 +20,9 @@ package client import ( - "os/exec" "syscall" + + exec "golang.org/x/sys/execabs" ) func getSysProcAttr() *syscall.SysProcAttr { diff --git a/runtime/v1/shim/local.go b/runtime/v1/shim/local.go index 97f652c..f9a271e 100644 --- a/runtime/v1/shim/local.go +++ b/runtime/v1/shim/local.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/runtime/v1/shim/service.go b/runtime/v1/shim/service.go index 50ac869..a08757d 100644 --- a/runtime/v1/shim/service.go +++ b/runtime/v1/shim/service.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -22,7 +23,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "sync" @@ -45,7 +45,6 @@ import ( "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -91,7 +90,7 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { } go s.processExits() if err := s.initPlatform(); err != nil { - return nil, errors.Wrap(err, "failed to initialized platform behavior") + return nil, fmt.Errorf("failed to initialized platform behavior: %w", err) } go s.forward(publisher) return s, nil @@ -160,7 +159,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * Options: rm.Options, } if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + return nil, fmt.Errorf("failed to mount rootfs component %v: %w", m, err) } } @@ -297,7 +296,7 @@ func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (* p := s.processes[r.ID] s.mu.Unlock() if p == nil { - return nil, errors.Errorf("process does not exist %s", r.ID) + return nil, fmt.Errorf("process does not exist %s", r.ID) } if err := p.Resize(ws); err != nil { return nil, errdefs.ToGRPC(err) @@ -411,7 +410,7 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh } a, err := typeurl.MarshalAny(d) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + return nil, fmt.Errorf("failed to marshal process %d info: %w", pid, err) } pInfo.Info = a break @@ -432,7 +431,7 @@ func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptyp } if stdin := p.Stdin(); stdin != nil { if err := stdin.Close(); err != nil { - return nil, errors.Wrap(err, "close stdin") + return nil, fmt.Errorf("close stdin: %w", err) } } return empty, nil @@ -542,7 +541,7 @@ func (s *Service) checkProcesses(e runc.Exit) { func shouldKillAllOnExit(ctx context.Context, bundlePath string) bool { var bundleSpec specs.Spec - bundleConfigContents, err := ioutil.ReadFile(filepath.Join(bundlePath, "config.json")) + bundleConfigContents, err := os.ReadFile(filepath.Join(bundlePath, "config.json")) if err != nil { log.G(ctx).WithError(err).Error("shouldKillAllOnExit: failed to read config.json") return true diff --git a/runtime/v1/shim/service_linux.go b/runtime/v1/shim/service_linux.go index c095004..4994906 100644 --- a/runtime/v1/shim/service_linux.go +++ b/runtime/v1/shim/service_linux.go @@ -18,6 +18,8 @@ package shim import ( "context" + "errors" + "fmt" "io" "net/url" "os" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/process" "github.com/containerd/fifo" - "github.com/pkg/errors" ) type linuxPlatform struct { @@ -65,7 +66,7 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console uri, err := url.Parse(stdout) if err != nil { - return nil, errors.Wrap(err, "unable to parse stdout uri") + return nil, fmt.Errorf("unable to parse stdout uri: %w", err) } switch uri.Scheme { @@ -89,14 +90,14 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console // Create pipe to be used by logging binary for Stdout outR, outW, err := os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stdout pipes") + return nil, fmt.Errorf("failed to create stdout pipes: %w", err) } filesToClose = append(filesToClose, outR) // Stderr is created for logging binary but unused when terminal is true serrR, _, err := os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stderr pipes") + return nil, fmt.Errorf("failed to create stderr pipes: %w", err) } filesToClose = append(filesToClose, serrR) @@ -118,18 +119,18 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console }() if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start logging binary process") + return nil, fmt.Errorf("failed to start logging binary process: %w", err) } // Close our side of the pipe after start if err := w.Close(); err != nil { - return nil, errors.Wrap(err, "failed to close write pipe after start") + return nil, fmt.Errorf("failed to close write pipe after start: %w", err) } // Wait for the logging binary to be ready b := make([]byte, 1) if _, err := r.Read(b); err != nil && err != io.EOF { - return nil, errors.Wrap(err, "failed to read from logging binary") + return nil, fmt.Errorf("failed to read from logging binary: %w", err) } cwg.Wait() @@ -164,7 +165,7 @@ func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Consol } epollConsole, ok := cons.(*console.EpollConsole) if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) + return fmt.Errorf("expected EpollConsole, got %#v", cons) } return epollConsole.Shutdown(p.epoller.CloseConsole) } @@ -181,7 +182,7 @@ func (s *Service) initPlatform() error { } epoller, err := console.NewEpoller() if err != nil { - return errors.Wrap(err, "failed to initialize epoller") + return fmt.Errorf("failed to initialize epoller: %w", err) } s.platform = &linuxPlatform{ epoller: epoller, diff --git a/runtime/v1/shim/service_unix.go b/runtime/v1/shim/service_unix.go index d96ecd6..2e6bfb2 100644 --- a/runtime/v1/shim/service_unix.go +++ b/runtime/v1/shim/service_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux // +build !windows,!linux /* @@ -20,6 +21,7 @@ package shim import ( "context" + "fmt" "io" "net/url" "os" @@ -30,7 +32,6 @@ import ( "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/process" "github.com/containerd/fifo" - "github.com/pkg/errors" ) type unixPlatform struct { @@ -54,7 +55,7 @@ func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, } uri, err := url.Parse(stdout) if err != nil { - return nil, errors.Wrap(err, "unable to parse stdout uri") + return nil, fmt.Errorf("unable to parse stdout uri: %w", err) } switch uri.Scheme { @@ -77,14 +78,14 @@ func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, // Create pipe to be used by logging binary for Stdout outR, outW, err := os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stdout pipes") + return nil, fmt.Errorf("failed to create stdout pipes: %w", err) } filesToClose = append(filesToClose, outR) // Stderr is created for logging binary but unused when terminal is true serrR, _, err := os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stderr pipes") + return nil, fmt.Errorf("failed to create stderr pipes: %w", err) } filesToClose = append(filesToClose, serrR) @@ -106,18 +107,18 @@ func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, }() if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start logging binary process") + return nil, fmt.Errorf("failed to start logging binary process: %w", err) } // Close our side of the pipe after start if err := w.Close(); err != nil { - return nil, errors.Wrap(err, "failed to close write pipe after start") + return nil, fmt.Errorf("failed to close write pipe after start: %w", err) } // Wait for the logging binary to be ready b := make([]byte, 1) if _, err := r.Read(b); err != nil && err != io.EOF { - return nil, errors.Wrap(err, "failed to read from logging binary") + return nil, fmt.Errorf("failed to read from logging binary: %w", err) } cwg.Wait() diff --git a/runtime/v2/binary.go b/runtime/v2/binary.go index 5a62a86..4ef1739 100644 --- a/runtime/v2/binary.go +++ b/runtime/v2/binary.go @@ -19,12 +19,13 @@ package v2 import ( "bytes" "context" + "fmt" "io" "os" + "path/filepath" gruntime "runtime" "strings" - "github.com/containerd/containerd/events/exchange" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/runtime" @@ -32,18 +33,22 @@ import ( "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/ttrpc" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) -func shimBinary(ctx context.Context, bundle *Bundle, runtime, containerdAddress string, containerdTTRPCAddress string, events *exchange.Exchange, rt *runtime.TaskList) *binary { +type shimBinaryConfig struct { + runtime string + address string + ttrpcAddress string + schedCore bool +} + +func shimBinary(bundle *Bundle, config shimBinaryConfig) *binary { return &binary{ bundle: bundle, - runtime: runtime, - containerdAddress: containerdAddress, - containerdTTRPCAddress: containerdTTRPCAddress, - events: events, - rtTasks: rt, + runtime: config.runtime, + containerdAddress: config.address, + containerdTTRPCAddress: config.ttrpcAddress, + schedCore: config.schedCore, } } @@ -51,27 +56,29 @@ type binary struct { runtime string containerdAddress string containerdTTRPCAddress string + schedCore bool bundle *Bundle - events *exchange.Exchange - rtTasks *runtime.TaskList } func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_ *shim, err error) { args := []string{"-id", b.bundle.ID} - if logrus.GetLevel() == logrus.DebugLevel { + switch log.GetLevel() { + case log.DebugLevel, log.TraceLevel: args = append(args, "-debug") } args = append(args, "start") cmd, err := client.Command( ctx, - b.runtime, - b.containerdAddress, - b.containerdTTRPCAddress, - b.bundle.Path, - opts, - args..., - ) + &client.CommandConfig{ + Runtime: b.runtime, + Address: b.containerdAddress, + TTRPCAddress: b.containerdTTRPCAddress, + Path: b.bundle.Path, + Opts: opts, + Args: args, + SchedCore: b.schedCore, + }) if err != nil { return nil, err } @@ -85,7 +92,7 @@ func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_ }() f, err := openShimLog(shimCtx, b.bundle, client.AnonDialer) if err != nil { - return nil, errors.Wrap(err, "open shim log pipe") + return nil, fmt.Errorf("open shim log pipe: %w", err) } defer func() { if err != nil { @@ -108,7 +115,7 @@ func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_ }() out, err := cmd.CombinedOutput() if err != nil { - return nil, errors.Wrapf(err, "%s", out) + return nil, fmt.Errorf("%s: %w", out, err) } address := strings.TrimSpace(string(out)) conn, err := client.Connect(address, client.AnonDialer) @@ -120,13 +127,14 @@ func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_ cancelShimLog() f.Close() } + // Save runtime binary path for restore. + if err := os.WriteFile(filepath.Join(b.bundle.Path, "shim-binary-path"), []byte(b.runtime), 0600); err != nil { + return nil, err + } client := ttrpc.NewClient(conn, ttrpc.WithOnClose(onCloseWithShimLog)) return &shim{ - bundle: b.bundle, - client: client, - task: task.NewTaskClient(client), - events: b.events, - rtTasks: b.rtTasks, + bundle: b.bundle, + client: client, }, nil } @@ -144,14 +152,19 @@ func (b *binary) Delete(ctx context.Context) (*runtime.Exit, error) { } cmd, err := client.Command(ctx, - b.runtime, - b.containerdAddress, - b.containerdTTRPCAddress, - bundlePath, - nil, - "-id", b.bundle.ID, - "-bundle", b.bundle.Path, - "delete") + &client.CommandConfig{ + Runtime: b.runtime, + Address: b.containerdAddress, + TTRPCAddress: b.containerdTTRPCAddress, + Path: bundlePath, + Opts: nil, + Args: []string{ + "-id", b.bundle.ID, + "-bundle", b.bundle.Path, + "delete", + }, + }) + if err != nil { return nil, err } @@ -163,7 +176,7 @@ func (b *binary) Delete(ctx context.Context) (*runtime.Exit, error) { cmd.Stderr = errb if err := cmd.Run(); err != nil { log.G(ctx).WithField("cmd", cmd).WithError(err).Error("failed to delete") - return nil, errors.Wrapf(err, "%s", errb.String()) + return nil, fmt.Errorf("%s: %w", errb.String(), err) } s := errb.String() if s != "" { diff --git a/runtime/v2/bundle.go b/runtime/v2/bundle.go index 954163b..8152a52 100644 --- a/runtime/v2/bundle.go +++ b/runtime/v2/bundle.go @@ -19,14 +19,12 @@ package v2 import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "github.com/containerd/containerd/identifiers" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" ) const configFilename = "config.json" @@ -47,7 +45,7 @@ func LoadBundle(ctx context.Context, root, id string) (*Bundle, error) { // NewBundle returns a new bundle on disk func NewBundle(ctx context.Context, root, state, id string, spec []byte) (b *Bundle, err error) { if err := identifiers.Validate(id); err != nil { - return nil, errors.Wrapf(err, "invalid task id %s", id) + return nil, fmt.Errorf("invalid task id %s: %w", id, err) } ns, err := namespaces.NamespaceRequired(ctx) @@ -103,7 +101,7 @@ func NewBundle(ctx context.Context, root, state, id string, spec []byte) (b *Bun return nil, err } // write the spec to the bundle - err = ioutil.WriteFile(filepath.Join(b.Path, configFilename), spec, 0666) + err = os.WriteFile(filepath.Join(b.Path, configFilename), spec, 0666) return b, err } @@ -122,10 +120,10 @@ func (b *Bundle) Delete() error { work, werr := os.Readlink(filepath.Join(b.Path, "work")) rootfs := filepath.Join(b.Path, "rootfs") if err := mount.UnmountAll(rootfs, 0); err != nil { - return errors.Wrapf(err, "unmount rootfs %s", rootfs) + return fmt.Errorf("unmount rootfs %s: %w", rootfs, err) } if err := os.Remove(rootfs); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to remove bundle rootfs") + return fmt.Errorf("failed to remove bundle rootfs: %w", err) } err := atomicDelete(b.Path) if err == nil { @@ -142,7 +140,7 @@ func (b *Bundle) Delete() error { return err } } - return errors.Wrapf(err, "failed to remove both bundle and workdir locations: %v", err2) + return fmt.Errorf("failed to remove both bundle and workdir locations: %v: %w", err2, err) } // atomicDelete renames the path to a hidden file before removal diff --git a/runtime/v2/bundle_linux_test.go b/runtime/v2/bundle_linux_test.go index 1828fb8..685dc2f 100644 --- a/runtime/v2/bundle_linux_test.go +++ b/runtime/v2/bundle_linux_test.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -48,9 +47,7 @@ func TestNewBundle(t *testing.T) { for i, tc := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - dir, err := ioutil.TempDir("", "test-new-bundle") - require.NoError(t, err, "failed to create test directory") - defer os.RemoveAll(dir) + dir := t.TempDir() work := filepath.Join(dir, "work") state := filepath.Join(dir, "state") id := fmt.Sprintf("new-bundle-%d", i) diff --git a/runtime/v2/example/cmd/main.go b/runtime/v2/example/cmd/main.go index fcef0d2..7a59d72 100644 --- a/runtime/v2/example/cmd/main.go +++ b/runtime/v2/example/cmd/main.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/runtime/v2/example/example.go b/runtime/v2/example/example.go index 995b3cc..85ce930 100644 --- a/runtime/v2/example/example.go +++ b/runtime/v2/example/example.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/runtime/v2/logging/logging_unix.go b/runtime/v2/logging/logging_unix.go index cde7179..f04fcc7 100644 --- a/runtime/v2/logging/logging_unix.go +++ b/runtime/v2/logging/logging_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/runtime/v2/logging/logging_windows.go b/runtime/v2/logging/logging_windows.go index 395dc82..67cebaf 100644 --- a/runtime/v2/logging/logging_windows.go +++ b/runtime/v2/logging/logging_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,6 +18,7 @@ package logging import ( "context" + "errors" "fmt" "net" "os" @@ -27,7 +26,6 @@ import ( "syscall" "github.com/Microsoft/go-winio" - "github.com/pkg/errors" ) // Run the logging driver @@ -55,19 +53,19 @@ func runInternal(fn LoggerFunc) error { return errors.New("'CONTAINER_STDOUT' environment variable missing") } if sout, err = winio.DialPipeContext(ctx, soutPipe); err != nil { - return errors.Wrap(err, "unable to dial stdout pipe") + return fmt.Errorf("unable to dial stdout pipe: %w", err) } if serrPipe, ok = os.LookupEnv("CONTAINER_STDERR"); !ok { return errors.New("'CONTAINER_STDERR' environment variable missing") } if serr, err = winio.DialPipeContext(ctx, serrPipe); err != nil { - return errors.Wrap(err, "unable to dial stderr pipe") + return fmt.Errorf("unable to dial stderr pipe: %w", err) } waitPipe = os.Getenv("CONTAINER_WAIT") if wait, err = winio.DialPipeContext(ctx, waitPipe); err != nil { - return errors.Wrap(err, "unable to dial wait pipe") + return fmt.Errorf("unable to dial wait pipe: %w", err) } config := &Config{ diff --git a/runtime/v2/manager.go b/runtime/v2/manager.go index f8cdd9c..1927cbb 100644 --- a/runtime/v2/manager.go +++ b/runtime/v2/manager.go @@ -19,29 +19,32 @@ package v2 import ( "context" "fmt" - "io/ioutil" "os" + "os/exec" "path/filepath" + "sync" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events/exchange" "github.com/containerd/containerd/log" "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/timeout" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" + shimbinary "github.com/containerd/containerd/runtime/v2/shim" + "github.com/containerd/containerd/runtime/v2/task" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // Config for the v2 runtime type Config struct { // Supported platforms Platforms []string `toml:"platforms"` + // SchedCore enabled linux core scheduling + SchedCore bool `toml:"sched_core"` } func init() { @@ -49,76 +52,110 @@ func init() { Type: plugin.RuntimePluginV2, ID: "task", Requires: []plugin.Type{ + plugin.EventPlugin, plugin.MetadataPlugin, }, Config: &Config{ Platforms: defaultPlatforms(), }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - supportedPlatforms, err := parsePlatforms(ic.Config.(*Config).Platforms) + config := ic.Config.(*Config) + supportedPlatforms, err := parsePlatforms(config.Platforms) if err != nil { return nil, err } ic.Meta.Platforms = supportedPlatforms - if err := os.MkdirAll(ic.Root, 0711); err != nil { - return nil, err - } - if err := os.MkdirAll(ic.State, 0711); err != nil { - return nil, err - } + m, err := ic.Get(plugin.MetadataPlugin) if err != nil { return nil, err } + ep, err := ic.GetByID(plugin.EventPlugin, "exchange") + if err != nil { + return nil, err + } cs := metadata.NewContainerStore(m.(*metadata.DB)) + events := ep.(*exchange.Exchange) - return New(ic.Context, ic.Root, ic.State, ic.Address, ic.TTRPCAddress, ic.Events, cs) + shimManager, err := NewShimManager(ic.Context, &ManagerConfig{ + Root: ic.Root, + State: ic.State, + Address: ic.Address, + TTRPCAddress: ic.TTRPCAddress, + Events: events, + Store: cs, + SchedCore: config.SchedCore, + }) + if err != nil { + return nil, err + } + + return NewTaskManager(shimManager), nil }, }) } -// New task manager for v2 shims -func New(ctx context.Context, root, state, containerdAddress, containerdTTRPCAddress string, events *exchange.Exchange, cs containers.Store) (*TaskManager, error) { - for _, d := range []string{root, state} { +type ManagerConfig struct { + Root string + State string + Store containers.Store + Events *exchange.Exchange + Address string + TTRPCAddress string + SchedCore bool +} + +// NewShimManager creates a manager for v2 shims +func NewShimManager(ctx context.Context, config *ManagerConfig) (*ShimManager, error) { + for _, d := range []string{config.Root, config.State} { if err := os.MkdirAll(d, 0711); err != nil { return nil, err } } - m := &TaskManager{ - root: root, - state: state, - containerdAddress: containerdAddress, - containerdTTRPCAddress: containerdTTRPCAddress, - tasks: runtime.NewTaskList(), - events: events, - containers: cs, + + m := &ShimManager{ + root: config.Root, + state: config.State, + containerdAddress: config.Address, + containerdTTRPCAddress: config.TTRPCAddress, + shims: runtime.NewTaskList(), + events: config.Events, + containers: config.Store, + schedCore: config.SchedCore, } + if err := m.loadExistingTasks(ctx); err != nil { return nil, err } + return m, nil } -// TaskManager manages v2 shim's and their tasks -type TaskManager struct { +// ShimManager manages currently running shim processes. +// It is mainly responsible for launching new shims and for proper shutdown and cleanup of existing instances. +// The manager is unaware of the underlying services shim provides and lets higher level services consume them, +// but don't care about lifecycle management. +type ShimManager struct { root string state string containerdAddress string containerdTTRPCAddress string - - tasks *runtime.TaskList - events *exchange.Exchange - containers containers.Store + schedCore bool + shims *runtime.TaskList + events *exchange.Exchange + containers containers.Store + // runtimePaths is a cache of `runtime names` -> `resolved fs path` + runtimePaths sync.Map } -// ID of the task manager -func (m *TaskManager) ID() string { - return fmt.Sprintf("%s.%s", plugin.RuntimePluginV2, "task") +// ID of the shim manager +func (m *ShimManager) ID() string { + return fmt.Sprintf("%s.%s", plugin.RuntimePluginV2, "shim") } -// Create a new task -func (m *TaskManager) Create(ctx context.Context, id string, opts runtime.CreateOpts) (_ runtime.Task, retErr error) { +// Start launches a new shim instance +func (m *ShimManager) Start(ctx context.Context, id string, opts runtime.CreateOpts) (_ ShimProcess, retErr error) { bundle, err := NewBundle(ctx, m.root, m.state, id, opts.Spec.Value) if err != nil { return nil, err @@ -135,23 +172,25 @@ func (m *TaskManager) Create(ctx context.Context, id string, opts runtime.Create } defer func() { if retErr != nil { - m.deleteShim(shim) + m.cleanupShim(shim) } }() - t, err := shim.Create(ctx, opts) - if err != nil { - return nil, errors.Wrap(err, "failed to create shim") + // NOTE: temporarily keep this wrapper around until containerd's task service depends on it. + // This will no longer be required once we migrate to client side task management. + shimTask := &shimTask{ + shim: shim, + task: task.NewTaskClient(shim.client), } - if err := m.tasks.Add(ctx, t); err != nil { - return nil, errors.Wrap(err, "failed to add task") + if err := m.shims.Add(ctx, shimTask); err != nil { + return nil, fmt.Errorf("failed to add task: %w", err) } - return t, nil + return shimTask, nil } -func (m *TaskManager) startShim(ctx context.Context, bundle *Bundle, id string, opts runtime.CreateOpts) (*shim, error) { +func (m *ShimManager) startShim(ctx context.Context, bundle *Bundle, id string, opts runtime.CreateOpts) (*shim, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -162,177 +201,137 @@ func (m *TaskManager) startShim(ctx context.Context, bundle *Bundle, id string, topts = opts.RuntimeOptions } - b := shimBinary(ctx, bundle, opts.Runtime, m.containerdAddress, m.containerdTTRPCAddress, m.events, m.tasks) + runtimePath, err := m.resolveRuntimePath(opts.Runtime) + if err != nil { + return nil, fmt.Errorf("failed to resolve runtime path: %w", err) + } + + b := shimBinary(bundle, shimBinaryConfig{ + runtime: runtimePath, + address: m.containerdAddress, + ttrpcAddress: m.containerdTTRPCAddress, + schedCore: m.schedCore, + }) shim, err := b.Start(ctx, topts, func() { log.G(ctx).WithField("id", id).Info("shim disconnected") - cleanupAfterDeadShim(context.Background(), id, ns, m.tasks, m.events, b) + cleanupAfterDeadShim(context.Background(), id, ns, m.shims, m.events, b) // Remove self from the runtime task list. Even though the cleanupAfterDeadShim() // would publish taskExit event, but the shim.Delete() would always failed with ttrpc // disconnect and there is no chance to remove this dead task from runtime task lists. // Thus it's better to delete it here. - m.tasks.Delete(ctx, id) + m.shims.Delete(ctx, id) }) if err != nil { - return nil, errors.Wrap(err, "start failed") + return nil, fmt.Errorf("start failed: %w", err) } return shim, nil } -// deleteShim attempts to properly delete and cleanup shim after error -func (m *TaskManager) deleteShim(shim *shim) { +func (m *ShimManager) resolveRuntimePath(runtime string) (string, error) { + if runtime == "" { + return "", fmt.Errorf("no runtime name") + } + + // Custom path to runtime binary + if filepath.IsAbs(runtime) { + // Make sure it exists before returning ok + if _, err := os.Stat(runtime); err != nil { + return "", fmt.Errorf("invalid custom binary path: %w", err) + } + + return runtime, nil + } + + // Preserve existing logic and resolve runtime path from runtime name. + + name := shimbinary.BinaryName(runtime) + if name == "" { + return "", fmt.Errorf("invalid runtime name %s, correct runtime name should be either format like `io.containerd.runc.v1` or a full path to the binary", runtime) + } + + if path, ok := m.runtimePaths.Load(name); ok { + return path.(string), nil + } + + var ( + cmdPath string + lerr error + ) + + binaryPath := shimbinary.BinaryPath(runtime) + if _, serr := os.Stat(binaryPath); serr == nil { + cmdPath = binaryPath + } + + if cmdPath == "" { + if cmdPath, lerr = exec.LookPath(name); lerr != nil { + if eerr, ok := lerr.(*exec.Error); ok { + if eerr.Err == exec.ErrNotFound { + self, err := os.Executable() + if err != nil { + return "", err + } + + // Match the calling binaries (containerd) path and see + // if they are side by side. If so, execute the shim + // found there. + testPath := filepath.Join(filepath.Dir(self), name) + if _, serr := os.Stat(testPath); serr == nil { + cmdPath = testPath + } + if cmdPath == "" { + return "", fmt.Errorf("runtime %q binary not installed %q: %w", runtime, name, os.ErrNotExist) + } + } + } + } + } + + cmdPath, err := filepath.Abs(cmdPath) + if err != nil { + return "", err + } + + if path, ok := m.runtimePaths.LoadOrStore(name, cmdPath); ok { + // We didn't store cmdPath we loaded an already cached value. Use it. + cmdPath = path.(string) + } + + return cmdPath, nil +} + +// cleanupShim attempts to properly delete and cleanup shim after error +func (m *ShimManager) cleanupShim(shim *shim) { dctx, cancel := timeout.WithContext(context.Background(), cleanupTimeout) defer cancel() - _, errShim := shim.Delete(dctx) - if errShim != nil { - if errdefs.IsDeadlineExceeded(errShim) { - dctx, cancel = timeout.WithContext(context.Background(), cleanupTimeout) - defer cancel() - } - shim.Shutdown(dctx) - shim.Close() - } + _ = shim.delete(dctx) + m.shims.Delete(dctx, shim.ID()) } -// Get a specific task -func (m *TaskManager) Get(ctx context.Context, id string) (runtime.Task, error) { - return m.tasks.Get(ctx, id) -} - -// Add a runtime task -func (m *TaskManager) Add(ctx context.Context, task runtime.Task) error { - return m.tasks.Add(ctx, task) -} - -// Delete a runtime task -func (m *TaskManager) Delete(ctx context.Context, id string) { - m.tasks.Delete(ctx, id) -} - -// Tasks lists all tasks -func (m *TaskManager) Tasks(ctx context.Context, all bool) ([]runtime.Task, error) { - return m.tasks.GetAll(ctx, all) -} - -func (m *TaskManager) loadExistingTasks(ctx context.Context) error { - nsDirs, err := ioutil.ReadDir(m.state) - if err != nil { - return err - } - for _, nsd := range nsDirs { - if !nsd.IsDir() { - continue - } - ns := nsd.Name() - // skip hidden directories - if len(ns) > 0 && ns[0] == '.' { - continue - } - log.G(ctx).WithField("namespace", ns).Debug("loading tasks in namespace") - if err := m.loadTasks(namespaces.WithNamespace(ctx, ns)); err != nil { - log.G(ctx).WithField("namespace", ns).WithError(err).Error("loading tasks in namespace") - continue - } - if err := m.cleanupWorkDirs(namespaces.WithNamespace(ctx, ns)); err != nil { - log.G(ctx).WithField("namespace", ns).WithError(err).Error("cleanup working directory in namespace") - continue - } - } - return nil -} - -func (m *TaskManager) loadTasks(ctx context.Context) error { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return err - } - shimDirs, err := ioutil.ReadDir(filepath.Join(m.state, ns)) - if err != nil { - return err - } - for _, sd := range shimDirs { - if !sd.IsDir() { - continue - } - id := sd.Name() - // skip hidden directories - if len(id) > 0 && id[0] == '.' { - continue - } - bundle, err := LoadBundle(ctx, m.state, id) - if err != nil { - // fine to return error here, it is a programmer error if the context - // does not have a namespace - return err - } - // fast path - bf, err := ioutil.ReadDir(bundle.Path) - if err != nil { - bundle.Delete() - log.G(ctx).WithError(err).Errorf("fast path read bundle path for %s", bundle.Path) - continue - } - if len(bf) == 0 { - bundle.Delete() - continue - } - container, err := m.container(ctx, id) - if err != nil { - log.G(ctx).WithError(err).Errorf("loading container %s", id) - if err := mount.UnmountAll(filepath.Join(bundle.Path, "rootfs"), 0); err != nil { - log.G(ctx).WithError(err).Errorf("forceful unmount of rootfs %s", id) - } - bundle.Delete() - continue - } - binaryCall := shimBinary(ctx, bundle, container.Runtime.Name, m.containerdAddress, m.containerdTTRPCAddress, m.events, m.tasks) - shim, err := loadShim(ctx, bundle, m.events, m.tasks, func() { - log.G(ctx).WithField("id", id).Info("shim disconnected") - - cleanupAfterDeadShim(context.Background(), id, ns, m.tasks, m.events, binaryCall) - // Remove self from the runtime task list. - m.tasks.Delete(ctx, id) - }) - if err != nil { - cleanupAfterDeadShim(ctx, id, ns, m.tasks, m.events, binaryCall) - continue - } - m.tasks.Add(ctx, shim) - } - return nil -} - -func (m *TaskManager) container(ctx context.Context, id string) (*containers.Container, error) { - container, err := m.containers.Get(ctx, id) +func (m *ShimManager) Get(ctx context.Context, id string) (ShimProcess, error) { + proc, err := m.shims.Get(ctx, id) if err != nil { return nil, err } - return &container, nil + + return proc, nil } -func (m *TaskManager) cleanupWorkDirs(ctx context.Context) error { - ns, err := namespaces.NamespaceRequired(ctx) +// Delete a runtime task +func (m *ShimManager) Delete(ctx context.Context, id string) error { + proc, err := m.shims.Get(ctx, id) if err != nil { return err } - dirs, err := ioutil.ReadDir(filepath.Join(m.root, ns)) - if err != nil { - return err - } - for _, d := range dirs { - // if the task was not loaded, cleanup and empty working directory - // this can happen on a reboot where /run for the bundle state is cleaned up - // but that persistent working dir is left - if _, err := m.tasks.Get(ctx, d.Name()); err != nil { - path := filepath.Join(m.root, ns, d.Name()) - if err := os.RemoveAll(path); err != nil { - log.G(ctx).WithError(err).Errorf("cleanup working dir %s", path) - } - } - } - return nil + + shimTask := proc.(*shimTask) + err = shimTask.shim.delete(ctx) + m.shims.Delete(ctx, id) + + return err } func parsePlatforms(platformStr []string) ([]ocispec.Platform, error) { @@ -346,3 +345,84 @@ func parsePlatforms(platformStr []string) ([]ocispec.Platform, error) { } return p, nil } + +// TaskManager wraps task service client on top of shim manager. +type TaskManager struct { + manager *ShimManager +} + +// NewTaskManager creates a new task manager instance. +func NewTaskManager(shims *ShimManager) *TaskManager { + return &TaskManager{ + manager: shims, + } +} + +// ID of the task manager +func (m *TaskManager) ID() string { + return fmt.Sprintf("%s.%s", plugin.RuntimePluginV2, "task") +} + +// Create launches new shim instance and creates new task +func (m *TaskManager) Create(ctx context.Context, taskID string, opts runtime.CreateOpts) (runtime.Task, error) { + process, err := m.manager.Start(ctx, taskID, opts) + if err != nil { + return nil, fmt.Errorf("failed to start shim: %w", err) + } + + // Cast to shim task and call task service to create a new container task instance. + // This will not be required once shim service / client implemented. + shim := process.(*shimTask) + t, err := shim.Create(ctx, opts) + if err != nil { + // NOTE: ctx contains required namespace information. + m.manager.shims.Delete(ctx, taskID) + + dctx, cancel := timeout.WithContext(context.Background(), cleanupTimeout) + defer cancel() + + _, errShim := shim.delete(dctx, func(context.Context, string) {}) + if errShim != nil { + if errdefs.IsDeadlineExceeded(errShim) { + dctx, cancel = timeout.WithContext(context.Background(), cleanupTimeout) + defer cancel() + } + + shim.Shutdown(dctx) + shim.Close() + } + + return nil, fmt.Errorf("failed to create shim task: %w", err) + } + + return t, nil +} + +// Get a specific task +func (m *TaskManager) Get(ctx context.Context, id string) (runtime.Task, error) { + return m.manager.shims.Get(ctx, id) +} + +// Tasks lists all tasks +func (m *TaskManager) Tasks(ctx context.Context, all bool) ([]runtime.Task, error) { + return m.manager.shims.GetAll(ctx, all) +} + +// Delete deletes the task and shim instance +func (m *TaskManager) Delete(ctx context.Context, taskID string) (*runtime.Exit, error) { + item, err := m.manager.shims.Get(ctx, taskID) + if err != nil { + return nil, err + } + + shimTask := item.(*shimTask) + exit, err := shimTask.delete(ctx, func(ctx context.Context, id string) { + m.manager.shims.Delete(ctx, id) + }) + + if err != nil { + return nil, fmt.Errorf("failed to delete task: %w", err) + } + + return exit, nil +} diff --git a/runtime/v2/manager_unix.go b/runtime/v2/manager_unix.go index 4e74125..eb87c20 100644 --- a/runtime/v2/manager_unix.go +++ b/runtime/v2/manager_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/runtime/v2/manager_windows.go b/runtime/v2/manager_windows.go index 838622b..fe2040d 100644 --- a/runtime/v2/manager_windows.go +++ b/runtime/v2/manager_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/runtime/v2/process.go b/runtime/v2/process.go index 903a428..84c90c7 100644 --- a/runtime/v2/process.go +++ b/runtime/v2/process.go @@ -18,18 +18,18 @@ package v2 import ( "context" + "errors" tasktypes "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/ttrpc" - "github.com/pkg/errors" ) type process struct { id string - shim *shim + shim *shimTask } func (p *process) ID() string { diff --git a/runtime/v2/runc/container.go b/runtime/v2/runc/container.go index cdfb087..aac9dad 100644 --- a/runtime/v2/runc/container.go +++ b/runtime/v2/runc/container.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -21,7 +22,7 @@ package runc import ( "context" "encoding/json" - "io/ioutil" + "fmt" "os" "path/filepath" "sync" @@ -37,7 +38,6 @@ import ( "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/typeurl" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -45,7 +45,7 @@ import ( func NewContainer(ctx context.Context, platform stdio.Platform, r *task.CreateTaskRequest) (_ *Container, retErr error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { - return nil, errors.Wrap(err, "create namespace") + return nil, fmt.Errorf("create namespace: %w", err) } var opts options.Options @@ -110,7 +110,7 @@ func NewContainer(ctx context.Context, platform stdio.Platform, r *task.CreateTa Options: rm.Options, } if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + return nil, fmt.Errorf("failed to mount rootfs component %v: %w", m, err) } } @@ -174,7 +174,7 @@ func ReadOptions(path string) (*options.Options, error) { return nil, err } - data, err := ioutil.ReadFile(filePath) + data, err := os.ReadFile(filePath) if err != nil { return nil, err } @@ -191,12 +191,12 @@ func WriteOptions(path string, opts options.Options) error { if err != nil { return err } - return ioutil.WriteFile(filepath.Join(path, optionsFilename), data, 0600) + return os.WriteFile(filepath.Join(path, optionsFilename), data, 0600) } // ReadRuntime reads the runtime information from the path func ReadRuntime(path string) (string, error) { - data, err := ioutil.ReadFile(filepath.Join(path, "runtime")) + data, err := os.ReadFile(filepath.Join(path, "runtime")) if err != nil { return "", err } @@ -205,7 +205,7 @@ func ReadRuntime(path string) (string, error) { // WriteRuntime writes the runtime information into the path func WriteRuntime(path, runtime string) error { - return ioutil.WriteFile(filepath.Join(path, "runtime"), []byte(runtime), 0600) + return os.WriteFile(filepath.Join(path, "runtime"), []byte(runtime), 0600) } func newInit(ctx context.Context, path, workDir, namespace string, platform stdio.Platform, @@ -300,13 +300,13 @@ func (c *Container) Process(id string) (process.Process, error) { defer c.mu.Unlock() if id == "" { if c.process == nil { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created") + return nil, fmt.Errorf("container must be created: %w", errdefs.ErrFailedPrecondition) } return c.process, nil } p, ok := c.processes[id] if !ok { - return nil, errors.Wrapf(errdefs.ErrNotFound, "process does not exist %s", id) + return nil, fmt.Errorf("process does not exist %s: %w", id, errdefs.ErrNotFound) } return p, nil } @@ -357,7 +357,7 @@ func (c *Container) Start(ctx context.Context, r *task.StartRequest) (process.Pr return nil, err } if err := p.Start(ctx); err != nil { - return nil, err + return p, err } if c.Cgroup() == nil && p.Pid() > 0 { var cg interface{} @@ -453,7 +453,7 @@ func (c *Container) CloseIO(ctx context.Context, r *task.CloseIORequest) error { } if stdin := p.Stdin(); stdin != nil { if err := stdin.Close(); err != nil { - return errors.Wrap(err, "close stdin") + return fmt.Errorf("close stdin: %w", err) } } return nil diff --git a/runtime/v2/runc/manager/manager_linux.go b/runtime/v2/runc/manager/manager_linux.go new file mode 100644 index 0000000..d5fb292 --- /dev/null +++ b/runtime/v2/runc/manager/manager_linux.go @@ -0,0 +1,285 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package manager + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + goruntime "runtime" + "syscall" + "time" + + "github.com/containerd/cgroups" + cgroupsv2 "github.com/containerd/cgroups/v2" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/process" + "github.com/containerd/containerd/pkg/schedcore" + "github.com/containerd/containerd/runtime/v2/runc" + "github.com/containerd/containerd/runtime/v2/runc/options" + "github.com/containerd/containerd/runtime/v2/shim" + runcC "github.com/containerd/go-runc" + "github.com/containerd/typeurl" + "github.com/gogo/protobuf/proto" + ptypes "github.com/gogo/protobuf/types" + exec "golang.org/x/sys/execabs" + "golang.org/x/sys/unix" +) + +// NewShimManager returns an implementation of the shim manager +// using runc +func NewShimManager(name string) shim.Manager { + return &manager{ + name: name, + } +} + +// group labels specifies how the shim groups services. +// currently supports a runc.v2 specific .group label and the +// standard k8s pod label. Order matters in this list +var groupLabels = []string{ + "io.containerd.runc.v2.group", + "io.kubernetes.cri.sandbox-id", +} + +type spec struct { + Annotations map[string]string `json:"annotations,omitempty"` +} + +type manager struct { + name string +} + +func newCommand(ctx context.Context, id, containerdBinary, containerdAddress, containerdTTRPCAddress string, debug bool) (*exec.Cmd, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + self, err := os.Executable() + if err != nil { + return nil, err + } + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + args := []string{ + "-namespace", ns, + "-id", id, + "-address", containerdAddress, + } + if debug { + args = append(args, "-debug") + } + cmd := exec.Command(self, args...) + cmd.Dir = cwd + cmd.Env = append(os.Environ(), "GOMAXPROCS=4") + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + return cmd, nil +} + +func readSpec() (*spec, error) { + f, err := os.Open("config.json") + if err != nil { + return nil, err + } + defer f.Close() + var s spec + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + return &s, nil +} + +func (m manager) Name() string { + return m.name +} + +func (manager) Start(ctx context.Context, id string, opts shim.StartOpts) (_ string, retErr error) { + cmd, err := newCommand(ctx, id, opts.ContainerdBinary, opts.Address, opts.TTRPCAddress, opts.Debug) + if err != nil { + return "", err + } + grouping := id + spec, err := readSpec() + if err != nil { + return "", err + } + for _, group := range groupLabels { + if groupID, ok := spec.Annotations[group]; ok { + grouping = groupID + break + } + } + address, err := shim.SocketAddress(ctx, opts.Address, grouping) + if err != nil { + return "", err + } + + socket, err := shim.NewSocket(address) + if err != nil { + // the only time where this would happen is if there is a bug and the socket + // was not cleaned up in the cleanup method of the shim or we are using the + // grouping functionality where the new process should be run with the same + // shim as an existing container + if !shim.SocketEaddrinuse(err) { + return "", fmt.Errorf("create new shim socket: %w", err) + } + if shim.CanConnect(address) { + if err := shim.WriteAddress("address", address); err != nil { + return "", fmt.Errorf("write existing socket for shim: %w", err) + } + return address, nil + } + if err := shim.RemoveSocket(address); err != nil { + return "", fmt.Errorf("remove pre-existing socket: %w", err) + } + if socket, err = shim.NewSocket(address); err != nil { + return "", fmt.Errorf("try create new shim socket 2x: %w", err) + } + } + defer func() { + if retErr != nil { + socket.Close() + _ = shim.RemoveSocket(address) + } + }() + + // make sure that reexec shim-v2 binary use the value if need + if err := shim.WriteAddress("address", address); err != nil { + return "", err + } + + f, err := socket.File() + if err != nil { + return "", err + } + + cmd.ExtraFiles = append(cmd.ExtraFiles, f) + + goruntime.LockOSThread() + if os.Getenv("SCHED_CORE") != "" { + if err := schedcore.Create(schedcore.ProcessGroup); err != nil { + return "", fmt.Errorf("enable sched core support: %w", err) + } + } + + if err := cmd.Start(); err != nil { + f.Close() + return "", err + } + + goruntime.UnlockOSThread() + + defer func() { + if retErr != nil { + cmd.Process.Kill() + } + }() + // make sure to wait after start + go cmd.Wait() + if data, err := io.ReadAll(os.Stdin); err == nil { + if len(data) > 0 { + var any ptypes.Any + if err := proto.Unmarshal(data, &any); err != nil { + return "", err + } + v, err := typeurl.UnmarshalAny(&any) + if err != nil { + return "", err + } + if opts, ok := v.(*options.Options); ok { + if opts.ShimCgroup != "" { + if cgroups.Mode() == cgroups.Unified { + cg, err := cgroupsv2.LoadManager("/sys/fs/cgroup", opts.ShimCgroup) + if err != nil { + return "", fmt.Errorf("failed to load cgroup %s: %w", opts.ShimCgroup, err) + } + if err := cg.AddProc(uint64(cmd.Process.Pid)); err != nil { + return "", fmt.Errorf("failed to join cgroup %s: %w", opts.ShimCgroup, err) + } + } else { + cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(opts.ShimCgroup)) + if err != nil { + return "", fmt.Errorf("failed to load cgroup %s: %w", opts.ShimCgroup, err) + } + if err := cg.Add(cgroups.Process{ + Pid: cmd.Process.Pid, + }); err != nil { + return "", fmt.Errorf("failed to join cgroup %s: %w", opts.ShimCgroup, err) + } + } + } + } + } + } + if err := shim.AdjustOOMScore(cmd.Process.Pid); err != nil { + return "", fmt.Errorf("failed to adjust OOM score for shim: %w", err) + } + return address, nil +} + +func (manager) Stop(ctx context.Context, id string) (shim.StopStatus, error) { + cwd, err := os.Getwd() + if err != nil { + return shim.StopStatus{}, err + } + + path := filepath.Join(filepath.Dir(cwd), id) + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return shim.StopStatus{}, err + } + runtime, err := runc.ReadRuntime(path) + if err != nil { + return shim.StopStatus{}, err + } + opts, err := runc.ReadOptions(path) + if err != nil { + return shim.StopStatus{}, err + } + root := process.RuncRoot + if opts != nil && opts.Root != "" { + root = opts.Root + } + + r := process.NewRunc(root, path, ns, runtime, "", false) + if err := r.Delete(ctx, id, &runcC.DeleteOpts{ + Force: true, + }); err != nil { + log.G(ctx).WithError(err).Warn("failed to remove runc container") + } + if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil { + log.G(ctx).WithError(err).Warn("failed to cleanup rootfs mount") + } + pid, err := runcC.ReadPidFile(filepath.Join(path, process.InitPidFile)) + if err != nil { + log.G(ctx).WithError(err).Warn("failed to read init pid file") + } + return shim.StopStatus{ + ExitedAt: time.Now(), + ExitStatus: 128 + int(unix.SIGKILL), + Pid: pid, + }, nil +} diff --git a/runtime/v2/runc/platform.go b/runtime/v2/runc/platform.go index 2ded840..c08d329 100644 --- a/runtime/v2/runc/platform.go +++ b/runtime/v2/runc/platform.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,6 +21,8 @@ package runc import ( "context" + "errors" + "fmt" "io" "net/url" "os" @@ -31,7 +34,6 @@ import ( "github.com/containerd/containerd/pkg/process" "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" - "github.com/pkg/errors" ) var bufPool = sync.Pool{ @@ -47,7 +49,7 @@ var bufPool = sync.Pool{ func NewPlatform() (stdio.Platform, error) { epoller, err := console.NewEpoller() if err != nil { - return nil, errors.Wrap(err, "failed to initialize epoller") + return nil, fmt.Errorf("failed to initialize epoller: %w", err) } go epoller.Wait() return &linuxPlatform{ @@ -89,7 +91,7 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console uri, err := url.Parse(stdout) if err != nil { - return nil, errors.Wrap(err, "unable to parse stdout uri") + return nil, fmt.Errorf("unable to parse stdout uri: %w", err) } switch uri.Scheme { @@ -113,14 +115,14 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console // Create pipe to be used by logging binary for Stdout outR, outW, err := os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stdout pipes") + return nil, fmt.Errorf("failed to create stdout pipes: %w", err) } filesToClose = append(filesToClose, outR) // Stderr is created for logging binary but unused when terminal is true serrR, _, err := os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stderr pipes") + return nil, fmt.Errorf("failed to create stderr pipes: %w", err) } filesToClose = append(filesToClose, serrR) @@ -142,18 +144,18 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console }() if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start logging binary process") + return nil, fmt.Errorf("failed to start logging binary process: %w", err) } // Close our side of the pipe after start if err := w.Close(); err != nil { - return nil, errors.Wrap(err, "failed to close write pipe after start") + return nil, fmt.Errorf("failed to close write pipe after start: %w", err) } // Wait for the logging binary to be ready b := make([]byte, 1) if _, err := r.Read(b); err != nil && err != io.EOF { - return nil, errors.Wrap(err, "failed to read from logging binary") + return nil, fmt.Errorf("failed to read from logging binary: %w", err) } cwg.Wait() @@ -190,7 +192,7 @@ func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Consol } epollConsole, ok := cons.(*console.EpollConsole) if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) + return fmt.Errorf("expected EpollConsole, got %#v", cons) } return epollConsole.Shutdown(p.epoller.CloseConsole) } diff --git a/runtime/v2/runc/task/plugin/plugin_linux.go b/runtime/v2/runc/task/plugin/plugin_linux.go new file mode 100644 index 0000000..603a274 --- /dev/null +++ b/runtime/v2/runc/task/plugin/plugin_linux.go @@ -0,0 +1,47 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime/v2/runc/task" + "github.com/containerd/containerd/runtime/v2/shim" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.TTRPCPlugin, + ID: "task", + Requires: []plugin.Type{ + plugin.EventPlugin, + plugin.InternalPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + pp, err := ic.GetByID(plugin.EventPlugin, "publisher") + if err != nil { + return nil, err + } + ss, err := ic.GetByID(plugin.InternalPlugin, "shutdown") + if err != nil { + return nil, err + } + return task.NewTaskService(ic.Context, pp.(shim.Publisher), ss.(shutdown.Service)) + }, + }) + +} diff --git a/runtime/v2/runc/task/service.go b/runtime/v2/runc/task/service.go new file mode 100644 index 0000000..d6af20f --- /dev/null +++ b/runtime/v2/runc/task/service.go @@ -0,0 +1,721 @@ +//go:build linux +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package task + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/containerd/cgroups" + cgroupsv2 "github.com/containerd/cgroups/v2" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/oom" + oomv1 "github.com/containerd/containerd/pkg/oom/v1" + oomv2 "github.com/containerd/containerd/pkg/oom/v2" + "github.com/containerd/containerd/pkg/process" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/pkg/stdio" + "github.com/containerd/containerd/pkg/userns" + "github.com/containerd/containerd/runtime/v2/runc" + "github.com/containerd/containerd/runtime/v2/runc/options" + "github.com/containerd/containerd/runtime/v2/shim" + shimapi "github.com/containerd/containerd/runtime/v2/task" + taskAPI "github.com/containerd/containerd/runtime/v2/task" + "github.com/containerd/containerd/sys/reaper" + runcC "github.com/containerd/go-runc" + "github.com/containerd/ttrpc" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" +) + +var ( + _ = (taskAPI.TaskService)(&service{}) + empty = &ptypes.Empty{} +) + +// NewTaskService creates a new instance of a task service +func NewTaskService(ctx context.Context, publisher shim.Publisher, sd shutdown.Service) (taskAPI.TaskService, error) { + var ( + ep oom.Watcher + err error + ) + if cgroups.Mode() == cgroups.Unified { + ep, err = oomv2.New(publisher) + } else { + ep, err = oomv1.New(publisher) + } + if err != nil { + return nil, err + } + go ep.Run(ctx) + s := &service{ + context: ctx, + events: make(chan interface{}, 128), + ec: reaper.Default.Subscribe(), + ep: ep, + shutdown: sd, + containers: make(map[string]*runc.Container), + running: make(map[int][]containerProcess), + exitSubscribers: make(map[*map[int][]runcC.Exit]struct{}), + } + go s.processExits() + runcC.Monitor = reaper.Default + if err := s.initPlatform(); err != nil { + return nil, fmt.Errorf("failed to initialized platform behavior: %w", err) + } + go s.forward(ctx, publisher) + sd.RegisterCallback(func(context.Context) error { + close(s.events) + return nil + }) + + if address, err := shim.ReadAddress("address"); err == nil { + sd.RegisterCallback(func(context.Context) error { + return shim.RemoveSocket(address) + }) + } + return s, nil +} + +// service is the shim implementation of a remote shim over GRPC +type service struct { + mu sync.Mutex + + context context.Context + events chan interface{} + platform stdio.Platform + ec chan runcC.Exit + ep oom.Watcher + + containers map[string]*runc.Container + + lifecycleMu sync.Mutex + running map[int][]containerProcess // pid -> running process, guarded by lifecycleMu + // Subscriptions to exits for PIDs. Adding/deleting subscriptions and + // dereferencing the subscription pointers must only be done while holding + // lifecycleMu. + exitSubscribers map[*map[int][]runcC.Exit]struct{} + + shutdown shutdown.Service +} + +type containerProcess struct { + Container *runc.Container + Process process.Process +} + +// preStart prepares for starting a container process and handling its exit. +// The container being started should be passed in as c when starting the +// container init process for an already-created container. c should be nil when +// creating a container or when starting an exec. +// +// The returned handleStarted closure records that the process has started so +// that its exit can be handled efficiently. If the process has already exited, +// it handles the exit immediately. handleStarted should be called after the +// event announcing the start of the process has been published. +// +// The returned cleanup closure releases resources used to handle early exits. +// It must be called before the caller of preStart returns, otherwise severe +// memory leaks will occur. +func (s *service) preStart(c *runc.Container) (handleStarted func(*runc.Container, process.Process), cleanup func()) { + exits := make(map[int][]runcC.Exit) + + s.lifecycleMu.Lock() + defer s.lifecycleMu.Unlock() + s.exitSubscribers[&exits] = struct{}{} + + if c != nil { + // Remove container init process from s.running so it will once again be + // treated as an early exit if it exits before handleStarted is called. + pid := c.Pid() + var newRunning []containerProcess + for _, cp := range s.running[pid] { + if cp.Container != c { + newRunning = append(newRunning, cp) + } + } + if len(newRunning) > 0 { + s.running[pid] = newRunning + } else { + delete(s.running, pid) + } + } + + handleStarted = func(c *runc.Container, p process.Process) { + var pid int + if p != nil { + pid = p.Pid() + } + + s.lifecycleMu.Lock() + ees, exited := exits[pid] + delete(s.exitSubscribers, &exits) + exits = nil + if pid == 0 { // no-op + s.lifecycleMu.Unlock() + } else if exited { + s.lifecycleMu.Unlock() + for _, ee := range ees { + s.handleProcessExit(ee, c, p) + } + } else { + s.running[pid] = append(s.running[pid], containerProcess{ + Container: c, + Process: p, + }) + s.lifecycleMu.Unlock() + } + } + + cleanup = func() { + if exits != nil { + s.lifecycleMu.Lock() + defer s.lifecycleMu.Unlock() + delete(s.exitSubscribers, &exits) + } + } + + return handleStarted, cleanup +} + +// Create a new initial process and container with the underlying OCI runtime +func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + handleStarted, cleanup := s.preStart(nil) + defer cleanup() + + container, err := runc.NewContainer(ctx, s.platform, r) + if err != nil { + return nil, err + } + + s.containers[r.ID] = container + + s.send(&eventstypes.TaskCreate{ + ContainerID: r.ID, + Bundle: r.Bundle, + Rootfs: r.Rootfs, + IO: &eventstypes.TaskIO{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + Checkpoint: r.Checkpoint, + Pid: uint32(container.Pid()), + }) + + // The following line cannot return an error as the only state in which that + // could happen would also cause the container.Pid() call above to + // nil-deference panic. + proc, _ := container.Process("") + handleStarted(container, proc) + + return &taskAPI.CreateTaskResponse{ + Pid: uint32(container.Pid()), + }, nil +} + +func (s *service) RegisterTTRPC(server *ttrpc.Server) error { + shimapi.RegisterTaskService(server, s) + return nil +} + +// Start a process +func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI.StartResponse, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + + var cinit *runc.Container + if r.ExecID == "" { + cinit = container + } + handleStarted, cleanup := s.preStart(cinit) + defer cleanup() + p, err := container.Start(ctx, r) + if err != nil { + handleStarted(container, p) + return nil, errdefs.ToGRPC(err) + } + + switch r.ExecID { + case "": + switch cg := container.Cgroup().(type) { + case cgroups.Cgroup: + if err := s.ep.Add(container.ID, cg); err != nil { + logrus.WithError(err).Error("add cg to OOM monitor") + } + case *cgroupsv2.Manager: + allControllers, err := cg.RootControllers() + if err != nil { + logrus.WithError(err).Error("failed to get root controllers") + } else { + if err := cg.ToggleControllers(allControllers, cgroupsv2.Enable); err != nil { + if userns.RunningInUserNS() { + logrus.WithError(err).Debugf("failed to enable controllers (%v)", allControllers) + } else { + logrus.WithError(err).Errorf("failed to enable controllers (%v)", allControllers) + } + } + } + if err := s.ep.Add(container.ID, cg); err != nil { + logrus.WithError(err).Error("add cg to OOM monitor") + } + } + + s.send(&eventstypes.TaskStart{ + ContainerID: container.ID, + Pid: uint32(p.Pid()), + }) + default: + s.send(&eventstypes.TaskExecStarted{ + ContainerID: container.ID, + ExecID: r.ExecID, + Pid: uint32(p.Pid()), + }) + } + handleStarted(container, p) + return &taskAPI.StartResponse{ + Pid: uint32(p.Pid()), + }, nil +} + +// Delete the initial process and container +func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAPI.DeleteResponse, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + p, err := container.Delete(ctx, r) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + // if we deleted an init task, send the task delete event + if r.ExecID == "" { + s.mu.Lock() + delete(s.containers, r.ID) + s.mu.Unlock() + s.send(&eventstypes.TaskDelete{ + ContainerID: container.ID, + Pid: uint32(p.Pid()), + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }) + } + return &taskAPI.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// Exec an additional process inside the container +func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + ok, cancel := container.ReserveProcess(r.ExecID) + if !ok { + return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ExecID) + } + process, err := container.Exec(ctx, r) + if err != nil { + cancel() + return nil, errdefs.ToGRPC(err) + } + + s.send(&eventstypes.TaskExecAdded{ + ContainerID: container.ID, + ExecID: process.ID(), + }) + return empty, nil +} + +// ResizePty of a process +func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + if err := container.ResizePty(ctx, r); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// State returns runtime state information for a process +func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.StateResponse, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + p, err := container.Process(r.ExecID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + st, err := p.Status(ctx) + if err != nil { + return nil, err + } + status := task.StatusUnknown + switch st { + case "created": + status = task.StatusCreated + case "running": + status = task.StatusRunning + case "stopped": + status = task.StatusStopped + case "paused": + status = task.StatusPaused + case "pausing": + status = task.StatusPausing + } + sio := p.Stdio() + return &taskAPI.StateResponse{ + ID: p.ID(), + Bundle: container.Bundle, + Pid: uint32(p.Pid()), + Status: status, + Stdin: sio.Stdin, + Stdout: sio.Stdout, + Stderr: sio.Stderr, + Terminal: sio.Terminal, + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +// Pause the container +func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + if err := container.Pause(ctx); err != nil { + return nil, errdefs.ToGRPC(err) + } + s.send(&eventstypes.TaskPaused{ + ContainerID: container.ID, + }) + return empty, nil +} + +// Resume the container +func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + if err := container.Resume(ctx); err != nil { + return nil, errdefs.ToGRPC(err) + } + s.send(&eventstypes.TaskResumed{ + ContainerID: container.ID, + }) + return empty, nil +} + +// Kill a process with the provided signal +func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + if err := container.Kill(ctx, r); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// Pids returns all pids inside the container +func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.PidsResponse, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + pids, err := s.getContainerPids(ctx, r.ID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + var processes []*task.ProcessInfo + for _, pid := range pids { + pInfo := task.ProcessInfo{ + Pid: pid, + } + for _, p := range container.ExecdProcesses() { + if p.Pid() == int(pid) { + d := &options.ProcessDetails{ + ExecID: p.ID(), + } + a, err := typeurl.MarshalAny(d) + if err != nil { + return nil, fmt.Errorf("failed to marshal process %d info: %w", pid, err) + } + pInfo.Info = a + break + } + } + processes = append(processes, &pInfo) + } + return &taskAPI.PidsResponse{ + Processes: processes, + }, nil +} + +// CloseIO of a process +func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + if err := container.CloseIO(ctx, r); err != nil { + return nil, err + } + return empty, nil +} + +// Checkpoint the container +func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + if err := container.Checkpoint(ctx, r); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// Update a running container +func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*ptypes.Empty, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + if err := container.Update(ctx, r); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// Wait for a process to exit +func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (*taskAPI.WaitResponse, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + p, err := container.Process(r.ExecID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + p.Wait() + + return &taskAPI.WaitResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +// Connect returns shim information such as the shim's pid +func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (*taskAPI.ConnectResponse, error) { + var pid int + if container, err := s.getContainer(r.ID); err == nil { + pid = container.Pid() + } + return &taskAPI.ConnectResponse{ + ShimPid: uint32(os.Getpid()), + TaskPid: uint32(pid), + }, nil +} + +func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // return out if the shim is still servicing containers + if len(s.containers) > 0 { + return empty, nil + } + + // please make sure that temporary resource has been cleanup or registered + // for cleanup before calling shutdown + s.shutdown.Shutdown() + + return empty, nil +} + +func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.StatsResponse, error) { + container, err := s.getContainer(r.ID) + if err != nil { + return nil, err + } + cgx := container.Cgroup() + if cgx == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "cgroup does not exist") + } + var statsx interface{} + switch cg := cgx.(type) { + case cgroups.Cgroup: + stats, err := cg.Stat(cgroups.IgnoreNotExist) + if err != nil { + return nil, err + } + statsx = stats + case *cgroupsv2.Manager: + stats, err := cg.Stat() + if err != nil { + return nil, err + } + statsx = stats + default: + return nil, errdefs.ToGRPCf(errdefs.ErrNotImplemented, "unsupported cgroup type %T", cg) + } + data, err := typeurl.MarshalAny(statsx) + if err != nil { + return nil, err + } + return &taskAPI.StatsResponse{ + Stats: data, + }, nil +} + +func (s *service) processExits() { + for e := range s.ec { + // While unlikely, it is not impossible for a container process to exit + // and have its PID be recycled for a new container process before we + // have a chance to process the first exit. As we have no way to tell + // for sure which of the processes the exit event corresponds to (until + // pidfd support is implemented) there is no way for us to handle the + // exit correctly in that case. + + s.lifecycleMu.Lock() + // Inform any concurrent s.Start() calls so they can handle the exit + // if the PID belongs to them. + for subscriber := range s.exitSubscribers { + (*subscriber)[e.Pid] = append((*subscriber)[e.Pid], e) + } + // Handle the exit for a created/started process. If there's more than + // one, assume they've all exited. One of them will be the correct + // process. + cps := s.running[e.Pid] + delete(s.running, e.Pid) + s.lifecycleMu.Unlock() + + for _, cp := range cps { + s.handleProcessExit(e, cp.Container, cp.Process) + } + } +} + +func (s *service) send(evt interface{}) { + s.events <- evt +} + +func (s *service) handleProcessExit(e runcC.Exit, c *runc.Container, p process.Process) { + s.mu.Lock() + defer s.mu.Unlock() + + if ip, ok := p.(*process.Init); ok { + // Ensure all children are killed + if runc.ShouldKillAllOnExit(s.context, c.Bundle) { + if err := ip.KillAll(s.context); err != nil { + logrus.WithError(err).WithField("id", ip.ID()). + Error("failed to kill init's children") + } + } + } + + p.SetExited(e.Status) + s.send(&eventstypes.TaskExit{ + ContainerID: c.ID, + ID: p.ID(), + Pid: uint32(e.Pid), + ExitStatus: uint32(e.Status), + ExitedAt: p.ExitedAt(), + }) +} + +func (s *service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { + container, err := s.getContainer(id) + if err != nil { + return nil, err + } + p, err := container.Process("") + if err != nil { + return nil, errdefs.ToGRPC(err) + } + ps, err := p.(*process.Init).Runtime().Ps(ctx, id) + if err != nil { + return nil, err + } + pids := make([]uint32, 0, len(ps)) + for _, pid := range ps { + pids = append(pids, uint32(pid)) + } + return pids, nil +} + +func (s *service) forward(ctx context.Context, publisher shim.Publisher) { + ns, _ := namespaces.Namespace(ctx) + ctx = namespaces.WithNamespace(context.Background(), ns) + for e := range s.events { + err := publisher.Publish(ctx, runc.GetTopic(e), e) + if err != nil { + logrus.WithError(err).Error("post event") + } + } + publisher.Close() +} + +func (s *service) getContainer(id string) (*runc.Container, error) { + s.mu.Lock() + container := s.containers[id] + s.mu.Unlock() + if container == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "container not created") + } + return container, nil +} + +// initialize a single epoll fd to manage our consoles. `initPlatform` should +// only be called once. +func (s *service) initPlatform() error { + if s.platform != nil { + return nil + } + p, err := runc.NewPlatform() + if err != nil { + return err + } + s.platform = p + s.shutdown.RegisterCallback(func(context.Context) error { return s.platform.Close() }) + return nil +} diff --git a/runtime/v2/runc/util.go b/runtime/v2/runc/util.go index 166597d..c965a22 100644 --- a/runtime/v2/runc/util.go +++ b/runtime/v2/runc/util.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -21,7 +22,7 @@ package runc import ( "context" "encoding/json" - "io/ioutil" + "os" "path/filepath" "github.com/containerd/containerd/api/events" @@ -65,7 +66,7 @@ func GetTopic(e interface{}) string { // there is an error reading the spec or if the container has a private PID namespace func ShouldKillAllOnExit(ctx context.Context, bundlePath string) bool { var bundleSpec specs.Spec - bundleConfigContents, err := ioutil.ReadFile(filepath.Join(bundlePath, "config.json")) + bundleConfigContents, err := os.ReadFile(filepath.Join(bundlePath, "config.json")) if err != nil { log.G(ctx).WithError(err).Error("shouldKillAllOnExit: failed to read config.json") return true diff --git a/runtime/v2/runc/v1/service.go b/runtime/v2/runc/v1/service.go index 71f2346..8c47343 100644 --- a/runtime/v2/runc/v1/service.go +++ b/runtime/v2/runc/v1/service.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,10 +21,11 @@ package v1 import ( "context" - "io/ioutil" + "fmt" + "io" "os" - "os/exec" "path/filepath" + goruntime "runtime" "sync" "syscall" "time" @@ -37,6 +39,7 @@ import ( "github.com/containerd/containerd/pkg/oom" oomv1 "github.com/containerd/containerd/pkg/oom/v1" "github.com/containerd/containerd/pkg/process" + "github.com/containerd/containerd/pkg/schedcore" "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/containerd/runtime/v2/runc" "github.com/containerd/containerd/runtime/v2/runc/options" @@ -47,8 +50,8 @@ import ( "github.com/containerd/typeurl" "github.com/gogo/protobuf/proto" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -76,7 +79,7 @@ func New(ctx context.Context, id string, publisher shim.Publisher, shutdown func runcC.Monitor = reaper.Default if err := s.initPlatform(); err != nil { shutdown() - return nil, errors.Wrap(err, "failed to initialized platform behavior") + return nil, fmt.Errorf("failed to initialized platform behavior: %w", err) } go s.forward(ctx, publisher) return s, nil @@ -141,7 +144,7 @@ func (s *service) StartShim(ctx context.Context, opts shim.StartOpts) (_ string, return "", err } if err := shim.RemoveSocket(address); err != nil { - return "", errors.Wrap(err, "remove already used socket") + return "", fmt.Errorf("remove already used socket: %w", err) } if socket, err = shim.NewSocket(address); err != nil { return "", err @@ -165,10 +168,19 @@ func (s *service) StartShim(ctx context.Context, opts shim.StartOpts) (_ string, cmd.ExtraFiles = append(cmd.ExtraFiles, f) + goruntime.LockOSThread() + if os.Getenv("SCHED_CORE") != "" { + if err := schedcore.Create(schedcore.ProcessGroup); err != nil { + return "", fmt.Errorf("enable sched core support: %w", err) + } + } + if err := cmd.Start(); err != nil { f.Close() return "", err } + goruntime.UnlockOSThread() + defer func() { if retErr != nil { cmd.Process.Kill() @@ -179,7 +191,7 @@ func (s *service) StartShim(ctx context.Context, opts shim.StartOpts) (_ string, if err := shim.WritePidFile("shim.pid", cmd.Process.Pid); err != nil { return "", err } - if data, err := ioutil.ReadAll(os.Stdin); err == nil { + if data, err := io.ReadAll(os.Stdin); err == nil { if len(data) > 0 { var any ptypes.Any if err := proto.Unmarshal(data, &any); err != nil { @@ -193,19 +205,19 @@ func (s *service) StartShim(ctx context.Context, opts shim.StartOpts) (_ string, if opts.ShimCgroup != "" { cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(opts.ShimCgroup)) if err != nil { - return "", errors.Wrapf(err, "failed to load cgroup %s", opts.ShimCgroup) + return "", fmt.Errorf("failed to load cgroup %s: %w", opts.ShimCgroup, err) } if err := cg.Add(cgroups.Process{ Pid: cmd.Process.Pid, }); err != nil { - return "", errors.Wrapf(err, "failed to join cgroup %s", opts.ShimCgroup) + return "", fmt.Errorf("failed to join cgroup %s: %w", opts.ShimCgroup, err) } } } } } if err := shim.AdjustOOMScore(cmd.Process.Pid); err != nil { - return "", errors.Wrap(err, "failed to adjust OOM score for shim") + return "", fmt.Errorf("failed to adjust OOM score for shim: %w", err) } return address, nil } @@ -247,9 +259,16 @@ func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error) if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil { logrus.WithError(err).Warn("failed to cleanup rootfs mount") } + + pid, err := runcC.ReadPidFile(filepath.Join(path, process.InitPidFile)) + if err != nil { + logrus.WithError(err).Warn("failed to read init pid file") + } + return &taskAPI.DeleteResponse{ ExitedAt: time.Now(), ExitStatus: 128 + uint32(unix.SIGKILL), + Pid: uint32(pid), }, nil } @@ -490,7 +509,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.Pi } a, err := typeurl.MarshalAny(d) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + return nil, fmt.Errorf("failed to marshal process %d info: %w", pid, err) } pInfo.Info = a break @@ -578,7 +597,11 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (*pt } func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.StatsResponse, error) { - cgx := s.container.Cgroup() + container, err := s.getContainer() + if err != nil { + return nil, err + } + cgx := container.Cgroup() if cgx == nil { return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "cgroup does not exist") } diff --git a/runtime/v2/runc/v2/service.go b/runtime/v2/runc/v2/service.go index 317c3f8..6dd8d6d 100644 --- a/runtime/v2/runc/v2/service.go +++ b/runtime/v2/runc/v2/service.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,815 +21,59 @@ package v2 import ( "context" - "encoding/json" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "sync" - "syscall" - "time" - "github.com/containerd/cgroups" - cgroupsv2 "github.com/containerd/cgroups/v2" - eventstypes "github.com/containerd/containerd/api/events" - "github.com/containerd/containerd/api/types/task" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/pkg/oom" - oomv1 "github.com/containerd/containerd/pkg/oom/v1" - oomv2 "github.com/containerd/containerd/pkg/oom/v2" - "github.com/containerd/containerd/pkg/process" - "github.com/containerd/containerd/pkg/stdio" - "github.com/containerd/containerd/pkg/userns" - "github.com/containerd/containerd/runtime/v2/runc" - "github.com/containerd/containerd/runtime/v2/runc/options" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/runtime/v2/runc/manager" + "github.com/containerd/containerd/runtime/v2/runc/task" "github.com/containerd/containerd/runtime/v2/shim" - taskAPI "github.com/containerd/containerd/runtime/v2/task" - "github.com/containerd/containerd/sys/reaper" - runcC "github.com/containerd/go-runc" - "github.com/containerd/typeurl" - "github.com/gogo/protobuf/proto" - ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" + shimapi "github.com/containerd/containerd/runtime/v2/task" ) -var ( - _ = (taskAPI.TaskService)(&service{}) - empty = &ptypes.Empty{} -) +// TODO(2.0): Remove this package -// group labels specifies how the shim groups services. -// currently supports a runc.v2 specific .group label and the -// standard k8s pod label. Order matters in this list -var groupLabels = []string{ - "io.containerd.runc.v2.group", - "io.kubernetes.cri.sandbox-id", +type shimTaskManager struct { + shimapi.TaskService + id string + manager shim.Manager } -type spec struct { - Annotations map[string]string `json:"annotations,omitempty"` -} - -// New returns a new shim service that can be used via GRPC -func New(ctx context.Context, id string, publisher shim.Publisher, shutdown func()) (shim.Shim, error) { - var ( - ep oom.Watcher - err error - ) - if cgroups.Mode() == cgroups.Unified { - ep, err = oomv2.New(publisher) - } else { - ep, err = oomv1.New(publisher) - } +func (stm *shimTaskManager) Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) { + ss, err := stm.manager.Stop(ctx, stm.id) if err != nil { return nil, err } - go ep.Run(ctx) - s := &service{ - id: id, - context: ctx, - events: make(chan interface{}, 128), - ec: reaper.Default.Subscribe(), - ep: ep, - cancel: shutdown, - containers: make(map[string]*runc.Container), - } - go s.processExits() - runcC.Monitor = reaper.Default - if err := s.initPlatform(); err != nil { - shutdown() - return nil, errors.Wrap(err, "failed to initialized platform behavior") - } - go s.forward(ctx, publisher) - - if address, err := shim.ReadAddress("address"); err == nil { - s.shimAddress = address - } - return s, nil -} - -// service is the shim implementation of a remote shim over GRPC -type service struct { - mu sync.Mutex - eventSendMu sync.Mutex - - context context.Context - events chan interface{} - platform stdio.Platform - ec chan runcC.Exit - ep oom.Watcher - - // id only used in cleanup case - id string - - containers map[string]*runc.Container - - shimAddress string - cancel func() -} - -func newCommand(ctx context.Context, id, containerdBinary, containerdAddress, containerdTTRPCAddress string) (*exec.Cmd, error) { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, err - } - self, err := os.Executable() - if err != nil { - return nil, err - } - cwd, err := os.Getwd() - if err != nil { - return nil, err - } - args := []string{ - "-namespace", ns, - "-id", id, - "-address", containerdAddress, - } - cmd := exec.Command(self, args...) - cmd.Dir = cwd - cmd.Env = append(os.Environ(), "GOMAXPROCS=4") - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } - return cmd, nil -} - -func readSpec() (*spec, error) { - f, err := os.Open("config.json") - if err != nil { - return nil, err - } - defer f.Close() - var s spec - if err := json.NewDecoder(f).Decode(&s); err != nil { - return nil, err - } - return &s, nil -} - -func (s *service) StartShim(ctx context.Context, opts shim.StartOpts) (_ string, retErr error) { - cmd, err := newCommand(ctx, opts.ID, opts.ContainerdBinary, opts.Address, opts.TTRPCAddress) - if err != nil { - return "", err - } - grouping := opts.ID - spec, err := readSpec() - if err != nil { - return "", err - } - for _, group := range groupLabels { - if groupID, ok := spec.Annotations[group]; ok { - grouping = groupID - break - } - } - address, err := shim.SocketAddress(ctx, opts.Address, grouping) - if err != nil { - return "", err - } - - socket, err := shim.NewSocket(address) - if err != nil { - // the only time where this would happen is if there is a bug and the socket - // was not cleaned up in the cleanup method of the shim or we are using the - // grouping functionality where the new process should be run with the same - // shim as an existing container - if !shim.SocketEaddrinuse(err) { - return "", errors.Wrap(err, "create new shim socket") - } - if shim.CanConnect(address) { - if err := shim.WriteAddress("address", address); err != nil { - return "", errors.Wrap(err, "write existing socket for shim") - } - return address, nil - } - if err := shim.RemoveSocket(address); err != nil { - return "", errors.Wrap(err, "remove pre-existing socket") - } - if socket, err = shim.NewSocket(address); err != nil { - return "", errors.Wrap(err, "try create new shim socket 2x") - } - } - defer func() { - if retErr != nil { - socket.Close() - _ = shim.RemoveSocket(address) - } - }() - - // make sure that reexec shim-v2 binary use the value if need - if err := shim.WriteAddress("address", address); err != nil { - return "", err - } - - f, err := socket.File() - if err != nil { - return "", err - } - - cmd.ExtraFiles = append(cmd.ExtraFiles, f) - - if err := cmd.Start(); err != nil { - f.Close() - return "", err - } - defer func() { - if retErr != nil { - cmd.Process.Kill() - } - }() - // make sure to wait after start - go cmd.Wait() - if data, err := ioutil.ReadAll(os.Stdin); err == nil { - if len(data) > 0 { - var any ptypes.Any - if err := proto.Unmarshal(data, &any); err != nil { - return "", err - } - v, err := typeurl.UnmarshalAny(&any) - if err != nil { - return "", err - } - if opts, ok := v.(*options.Options); ok { - if opts.ShimCgroup != "" { - if cgroups.Mode() == cgroups.Unified { - if err := cgroupsv2.VerifyGroupPath(opts.ShimCgroup); err != nil { - return "", errors.Wrapf(err, "failed to verify cgroup path %q", opts.ShimCgroup) - } - cg, err := cgroupsv2.LoadManager("/sys/fs/cgroup", opts.ShimCgroup) - if err != nil { - return "", errors.Wrapf(err, "failed to load cgroup %s", opts.ShimCgroup) - } - if err := cg.AddProc(uint64(cmd.Process.Pid)); err != nil { - return "", errors.Wrapf(err, "failed to join cgroup %s", opts.ShimCgroup) - } - } else { - cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(opts.ShimCgroup)) - if err != nil { - return "", errors.Wrapf(err, "failed to load cgroup %s", opts.ShimCgroup) - } - if err := cg.Add(cgroups.Process{ - Pid: cmd.Process.Pid, - }); err != nil { - return "", errors.Wrapf(err, "failed to join cgroup %s", opts.ShimCgroup) - } - } - } - } - } - } - if err := shim.AdjustOOMScore(cmd.Process.Pid); err != nil { - return "", errors.Wrap(err, "failed to adjust OOM score for shim") - } - return address, nil -} - -func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error) { - cwd, err := os.Getwd() - if err != nil { - return nil, err - } - - path := filepath.Join(filepath.Dir(cwd), s.id) - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, err - } - runtime, err := runc.ReadRuntime(path) - if err != nil { - return nil, err - } - opts, err := runc.ReadOptions(path) - if err != nil { - return nil, err - } - root := process.RuncRoot - if opts != nil && opts.Root != "" { - root = opts.Root - } - - r := process.NewRunc(root, path, ns, runtime, "", false) - if err := r.Delete(ctx, s.id, &runcC.DeleteOpts{ - Force: true, - }); err != nil { - logrus.WithError(err).Warn("failed to remove runc container") - } - if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil { - logrus.WithError(err).Warn("failed to cleanup rootfs mount") - } - return &taskAPI.DeleteResponse{ - ExitedAt: time.Now(), - ExitStatus: 128 + uint32(unix.SIGKILL), + return &shimapi.DeleteResponse{ + Pid: uint32(ss.Pid), + ExitStatus: uint32(ss.ExitStatus), + ExitedAt: ss.ExitedAt, }, nil } -// Create a new initial process and container with the underlying OCI runtime -func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - container, err := runc.NewContainer(ctx, s.platform, r) - if err != nil { - return nil, err - } - - s.containers[r.ID] = container - - s.send(&eventstypes.TaskCreate{ - ContainerID: r.ID, - Bundle: r.Bundle, - Rootfs: r.Rootfs, - IO: &eventstypes.TaskIO{ - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Terminal: r.Terminal, - }, - Checkpoint: r.Checkpoint, - Pid: uint32(container.Pid()), - }) - - return &taskAPI.CreateTaskResponse{ - Pid: uint32(container.Pid()), - }, nil +func (stm *shimTaskManager) StartShim(ctx context.Context, opts shim.StartOpts) (string, error) { + return stm.manager.Start(ctx, opts.ID, opts) } -// Start a process -func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI.StartResponse, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - - // hold the send lock so that the start events are sent before any exit events in the error case - s.eventSendMu.Lock() - p, err := container.Start(ctx, r) - if err != nil { - s.eventSendMu.Unlock() - return nil, errdefs.ToGRPC(err) - } - - switch r.ExecID { - case "": - switch cg := container.Cgroup().(type) { - case cgroups.Cgroup: - if err := s.ep.Add(container.ID, cg); err != nil { - logrus.WithError(err).Error("add cg to OOM monitor") - } - case *cgroupsv2.Manager: - allControllers, err := cg.RootControllers() - if err != nil { - logrus.WithError(err).Error("failed to get root controllers") - } else { - if err := cg.ToggleControllers(allControllers, cgroupsv2.Enable); err != nil { - if userns.RunningInUserNS() { - logrus.WithError(err).Debugf("failed to enable controllers (%v)", allControllers) - } else { - logrus.WithError(err).Errorf("failed to enable controllers (%v)", allControllers) - } - } - } - if err := s.ep.Add(container.ID, cg); err != nil { - logrus.WithError(err).Error("add cg to OOM monitor") - } - } - - s.send(&eventstypes.TaskStart{ - ContainerID: container.ID, - Pid: uint32(p.Pid()), - }) - default: - s.send(&eventstypes.TaskExecStarted{ - ContainerID: container.ID, - ExecID: r.ExecID, - Pid: uint32(p.Pid()), - }) - } - s.eventSendMu.Unlock() - return &taskAPI.StartResponse{ - Pid: uint32(p.Pid()), - }, nil -} - -// Delete the initial process and container -func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAPI.DeleteResponse, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - p, err := container.Delete(ctx, r) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - // if we deleted an init task, send the task delete event - if r.ExecID == "" { - s.mu.Lock() - delete(s.containers, r.ID) - s.mu.Unlock() - s.send(&eventstypes.TaskDelete{ - ContainerID: container.ID, - Pid: uint32(p.Pid()), - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - }) - } - return &taskAPI.DeleteResponse{ - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - Pid: uint32(p.Pid()), - }, nil -} - -// Exec an additional process inside the container -func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - ok, cancel := container.ReserveProcess(r.ExecID) +// New returns a new shim service that can be used for +// - serving the task service over grpc/ttrpc +// - shim management +// This function is deprecated in favor direct creation +// of shim manager and registering task service via plugins. +func New(ctx context.Context, id string, publisher shim.Publisher, fn func()) (shim.Shim, error) { + sd, ok := ctx.(shutdown.Service) if !ok { - return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ExecID) + ctx, sd = shutdown.WithShutdown(ctx) + sd.RegisterCallback(func(context.Context) error { + fn() + return nil + }) } - process, err := container.Exec(ctx, r) - if err != nil { - cancel() - return nil, errdefs.ToGRPC(err) - } - - s.send(&eventstypes.TaskExecAdded{ - ContainerID: container.ID, - ExecID: process.ID(), - }) - return empty, nil -} - -// ResizePty of a process -func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) + ts, err := task.NewTaskService(ctx, publisher, sd) if err != nil { return nil, err } - if err := container.ResizePty(ctx, r); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// State returns runtime state information for a process -func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.StateResponse, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - p, err := container.Process(r.ExecID) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - st, err := p.Status(ctx) - if err != nil { - return nil, err - } - status := task.StatusUnknown - switch st { - case "created": - status = task.StatusCreated - case "running": - status = task.StatusRunning - case "stopped": - status = task.StatusStopped - case "paused": - status = task.StatusPaused - case "pausing": - status = task.StatusPausing - } - sio := p.Stdio() - return &taskAPI.StateResponse{ - ID: p.ID(), - Bundle: container.Bundle, - Pid: uint32(p.Pid()), - Status: status, - Stdin: sio.Stdin, - Stdout: sio.Stdout, - Stderr: sio.Stderr, - Terminal: sio.Terminal, - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), + return &shimTaskManager{ + TaskService: ts, + id: id, + manager: manager.NewShimManager("runc"), }, nil } - -// Pause the container -func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - if err := container.Pause(ctx); err != nil { - return nil, errdefs.ToGRPC(err) - } - s.send(&eventstypes.TaskPaused{ - ContainerID: container.ID, - }) - return empty, nil -} - -// Resume the container -func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - if err := container.Resume(ctx); err != nil { - return nil, errdefs.ToGRPC(err) - } - s.send(&eventstypes.TaskResumed{ - ContainerID: container.ID, - }) - return empty, nil -} - -// Kill a process with the provided signal -func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - if err := container.Kill(ctx, r); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// Pids returns all pids inside the container -func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.PidsResponse, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - pids, err := s.getContainerPids(ctx, r.ID) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - var processes []*task.ProcessInfo - for _, pid := range pids { - pInfo := task.ProcessInfo{ - Pid: pid, - } - for _, p := range container.ExecdProcesses() { - if p.Pid() == int(pid) { - d := &options.ProcessDetails{ - ExecID: p.ID(), - } - a, err := typeurl.MarshalAny(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) - } - pInfo.Info = a - break - } - } - processes = append(processes, &pInfo) - } - return &taskAPI.PidsResponse{ - Processes: processes, - }, nil -} - -// CloseIO of a process -func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - if err := container.CloseIO(ctx, r); err != nil { - return nil, err - } - return empty, nil -} - -// Checkpoint the container -func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - if err := container.Checkpoint(ctx, r); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// Update a running container -func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*ptypes.Empty, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - if err := container.Update(ctx, r); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// Wait for a process to exit -func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (*taskAPI.WaitResponse, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - p, err := container.Process(r.ExecID) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - p.Wait() - - return &taskAPI.WaitResponse{ - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - }, nil -} - -// Connect returns shim information such as the shim's pid -func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (*taskAPI.ConnectResponse, error) { - var pid int - if container, err := s.getContainer(r.ID); err == nil { - pid = container.Pid() - } - return &taskAPI.ConnectResponse{ - ShimPid: uint32(os.Getpid()), - TaskPid: uint32(pid), - }, nil -} - -func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (*ptypes.Empty, error) { - s.mu.Lock() - defer s.mu.Unlock() - - // return out if the shim is still servicing containers - if len(s.containers) > 0 { - return empty, nil - } - - if s.platform != nil { - s.platform.Close() - } - - if s.shimAddress != "" { - _ = shim.RemoveSocket(s.shimAddress) - } - - // please make sure that temporary resource has been cleanup - // before shutdown service. - s.cancel() - close(s.events) - return empty, nil -} - -func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.StatsResponse, error) { - container, err := s.getContainer(r.ID) - if err != nil { - return nil, err - } - cgx := container.Cgroup() - if cgx == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "cgroup does not exist") - } - var statsx interface{} - switch cg := cgx.(type) { - case cgroups.Cgroup: - stats, err := cg.Stat(cgroups.IgnoreNotExist) - if err != nil { - return nil, err - } - statsx = stats - case *cgroupsv2.Manager: - stats, err := cg.Stat() - if err != nil { - return nil, err - } - statsx = stats - default: - return nil, errdefs.ToGRPCf(errdefs.ErrNotImplemented, "unsupported cgroup type %T", cg) - } - data, err := typeurl.MarshalAny(statsx) - if err != nil { - return nil, err - } - return &taskAPI.StatsResponse{ - Stats: data, - }, nil -} - -func (s *service) processExits() { - for e := range s.ec { - s.checkProcesses(e) - } -} - -func (s *service) send(evt interface{}) { - s.events <- evt -} - -func (s *service) sendL(evt interface{}) { - s.eventSendMu.Lock() - s.events <- evt - s.eventSendMu.Unlock() -} - -func (s *service) checkProcesses(e runcC.Exit) { - s.mu.Lock() - defer s.mu.Unlock() - - for _, container := range s.containers { - if !container.HasPid(e.Pid) { - continue - } - - for _, p := range container.All() { - if p.Pid() != e.Pid { - continue - } - - if ip, ok := p.(*process.Init); ok { - // Ensure all children are killed - if runc.ShouldKillAllOnExit(s.context, container.Bundle) { - if err := ip.KillAll(s.context); err != nil { - logrus.WithError(err).WithField("id", ip.ID()). - Error("failed to kill init's children") - } - } - } - - p.SetExited(e.Status) - s.sendL(&eventstypes.TaskExit{ - ContainerID: container.ID, - ID: p.ID(), - Pid: uint32(e.Pid), - ExitStatus: uint32(e.Status), - ExitedAt: p.ExitedAt(), - }) - return - } - return - } -} - -func (s *service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { - container, err := s.getContainer(id) - if err != nil { - return nil, err - } - p, err := container.Process("") - if err != nil { - return nil, errdefs.ToGRPC(err) - } - ps, err := p.(*process.Init).Runtime().Ps(ctx, id) - if err != nil { - return nil, err - } - pids := make([]uint32, 0, len(ps)) - for _, pid := range ps { - pids = append(pids, uint32(pid)) - } - return pids, nil -} - -func (s *service) forward(ctx context.Context, publisher shim.Publisher) { - ns, _ := namespaces.Namespace(ctx) - ctx = namespaces.WithNamespace(context.Background(), ns) - for e := range s.events { - err := publisher.Publish(ctx, runc.GetTopic(e), e) - if err != nil { - logrus.WithError(err).Error("post event") - } - } - publisher.Close() -} - -func (s *service) getContainer(id string) (*runc.Container, error) { - s.mu.Lock() - container := s.containers[id] - s.mu.Unlock() - if container == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "container not created") - } - return container, nil -} - -// initialize a single epoll fd to manage our consoles. `initPlatform` should -// only be called once. -func (s *service) initPlatform() error { - if s.platform != nil { - return nil - } - p, err := runc.NewPlatform() - if err != nil { - return err - } - s.platform = p - return nil -} diff --git a/runtime/v2/shim.go b/runtime/v2/shim.go index 893926c..456ffb4 100644 --- a/runtime/v2/shim.go +++ b/runtime/v2/shim.go @@ -18,8 +18,9 @@ package v2 import ( "context" + "errors" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "time" @@ -38,7 +39,7 @@ import ( "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/ttrpc" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" ) @@ -55,14 +56,14 @@ func init() { } func loadAddress(path string) (string, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return "", err } return string(data), nil } -func loadShim(ctx context.Context, bundle *Bundle, events *exchange.Exchange, rt *runtime.TaskList, onClose func()) (_ *shim, err error) { +func loadShim(ctx context.Context, bundle *Bundle, onClose func()) (_ *shimTask, err error) { address, err := loadAddress(filepath.Join(bundle.Path, "address")) if err != nil { return nil, err @@ -84,7 +85,7 @@ func loadShim(ctx context.Context, bundle *Bundle, events *exchange.Exchange, rt }() f, err := openShimLog(shimCtx, bundle, client.AnonReconnectDialer) if err != nil { - return nil, errors.Wrap(err, "open shim log pipe when reload") + return nil, fmt.Errorf("open shim log pipe when reload: %w", err) } defer func() { if err != nil { @@ -116,16 +117,18 @@ func loadShim(ctx context.Context, bundle *Bundle, events *exchange.Exchange, rt client.Close() } }() - s := &shim{ - client: client, - task: task.NewTaskClient(client), - bundle: bundle, - events: events, - rtTasks: rt, + s := &shimTask{ + shim: &shim{ + bundle: bundle, + client: client, + }, + task: task.NewTaskClient(client), } ctx, cancel := timeout.WithContext(ctx, loadTimeout) defer cancel() - if err := s.Connect(ctx); err != nil { + + // Check connectivity + if _, err := s.PID(ctx); err != nil { return nil, err } return s, nil @@ -183,40 +186,19 @@ func cleanupAfterDeadShim(ctx context.Context, id, ns string, rt *runtime.TaskLi }) } +// ShimProcess represents a shim instance managed by the shim service. +type ShimProcess interface { + runtime.Process + + // ID of the shim. + ID() string + // Namespace of this shim. + Namespace() string +} + type shim struct { - bundle *Bundle - client *ttrpc.Client - task task.TaskService - taskPid int - events *exchange.Exchange - rtTasks *runtime.TaskList -} - -func (s *shim) Connect(ctx context.Context) error { - response, err := s.task.Connect(ctx, &task.ConnectRequest{ - ID: s.ID(), - }) - if err != nil { - return err - } - s.taskPid = int(response.TaskPid) - return nil -} - -func (s *shim) Shutdown(ctx context.Context) error { - _, err := s.task.Shutdown(ctx, &task.ShutdownRequest{ - ID: s.ID(), - }) - if err != nil && !errors.Is(err, ttrpc.ErrClosed) { - return errdefs.FromGRPC(err) - } - return nil -} - -func (s *shim) waitShutdown(ctx context.Context) error { - ctx, cancel := timeout.WithContext(ctx, shutdownTimeout) - defer cancel() - return s.Shutdown(ctx) + bundle *Bundle + client *ttrpc.Client } // ID of the shim/task @@ -224,11 +206,6 @@ func (s *shim) ID() string { return s.bundle.ID } -// PID of the task -func (s *shim) PID() uint32 { - return uint32(s.taskPid) -} - func (s *shim) Namespace() string { return s.bundle.Namespace } @@ -237,7 +214,64 @@ func (s *shim) Close() error { return s.client.Close() } -func (s *shim) Delete(ctx context.Context) (*runtime.Exit, error) { +func (s *shim) delete(ctx context.Context) error { + var ( + result *multierror.Error + ) + + if err := s.Close(); err != nil { + result = multierror.Append(result, fmt.Errorf("failed to close ttrpc client: %w", err)) + } + + if err := s.client.UserOnCloseWait(ctx); err != nil { + result = multierror.Append(result, fmt.Errorf("close wait error: %w", err)) + } + + if err := s.bundle.Delete(); err != nil { + log.G(ctx).WithField("id", s.ID()).WithError(err).Error("failed to delete bundle") + result = multierror.Append(result, fmt.Errorf("failed to delete bundle: %w", err)) + } + + return result.ErrorOrNil() +} + +var _ runtime.Task = &shimTask{} + +// shimTask wraps shim process and adds task service client for compatibility with existing shim manager. +type shimTask struct { + *shim + task task.TaskService +} + +func (s *shimTask) Shutdown(ctx context.Context) error { + _, err := s.task.Shutdown(ctx, &task.ShutdownRequest{ + ID: s.ID(), + }) + if err != nil && !errors.Is(err, ttrpc.ErrClosed) { + return errdefs.FromGRPC(err) + } + return nil +} + +func (s *shimTask) waitShutdown(ctx context.Context) error { + ctx, cancel := timeout.WithContext(ctx, shutdownTimeout) + defer cancel() + return s.Shutdown(ctx) +} + +// PID of the task +func (s *shimTask) PID(ctx context.Context) (uint32, error) { + response, err := s.task.Connect(ctx, &task.ConnectRequest{ + ID: s.ID(), + }) + if err != nil { + return 0, errdefs.FromGRPC(err) + } + + return response.TaskPid, nil +} + +func (s *shimTask) delete(ctx context.Context, removeTask func(ctx context.Context, id string)) (*runtime.Exit, error) { response, shimErr := s.task.Delete(ctx, &task.DeleteRequest{ ID: s.ID(), }) @@ -262,24 +296,25 @@ func (s *shim) Delete(ctx context.Context) (*runtime.Exit, error) { // So we should remove the record and prevent duplicate events from // ttrpc-callback-on-close. if shimErr == nil { - s.rtTasks.Delete(ctx, s.ID()) + removeTask(ctx, s.ID()) } if err := s.waitShutdown(ctx); err != nil { - log.G(ctx).WithField("id", s.ID()).WithError(err).Error("failed to shutdown shim") + log.G(ctx).WithField("id", s.ID()).WithError(err).Error("failed to shutdown shim task") + } + + if err := s.shim.delete(ctx); err != nil { + log.G(ctx).WithField("id", s.ID()).WithError(err).Error("failed to delete shim") } - s.Close() - s.client.UserOnCloseWait(ctx) // remove self from the runtime task list // this seems dirty but it cleans up the API across runtimes, tasks, and the service - s.rtTasks.Delete(ctx, s.ID()) - if err := s.bundle.Delete(); err != nil { - log.G(ctx).WithField("id", s.ID()).WithError(err).Error("failed to delete bundle") - } + removeTask(ctx, s.ID()) + if shimErr != nil { return nil, shimErr } + return &runtime.Exit{ Status: response.ExitStatus, Timestamp: response.ExitedAt, @@ -287,7 +322,7 @@ func (s *shim) Delete(ctx context.Context) (*runtime.Exit, error) { }, nil } -func (s *shim) Create(ctx context.Context, opts runtime.CreateOpts) (runtime.Task, error) { +func (s *shimTask) Create(ctx context.Context, opts runtime.CreateOpts) (runtime.Task, error) { topts := opts.TaskOptions if topts == nil { topts = opts.RuntimeOptions @@ -309,15 +344,16 @@ func (s *shim) Create(ctx context.Context, opts runtime.CreateOpts) (runtime.Tas Options: m.Options, }) } - response, err := s.task.Create(ctx, request) + + _, err := s.task.Create(ctx, request) if err != nil { return nil, errdefs.FromGRPC(err) } - s.taskPid = int(response.Pid) + return s, nil } -func (s *shim) Pause(ctx context.Context) error { +func (s *shimTask) Pause(ctx context.Context) error { if _, err := s.task.Pause(ctx, &task.PauseRequest{ ID: s.ID(), }); err != nil { @@ -326,7 +362,7 @@ func (s *shim) Pause(ctx context.Context) error { return nil } -func (s *shim) Resume(ctx context.Context) error { +func (s *shimTask) Resume(ctx context.Context) error { if _, err := s.task.Resume(ctx, &task.ResumeRequest{ ID: s.ID(), }); err != nil { @@ -335,18 +371,17 @@ func (s *shim) Resume(ctx context.Context) error { return nil } -func (s *shim) Start(ctx context.Context) error { - response, err := s.task.Start(ctx, &task.StartRequest{ +func (s *shimTask) Start(ctx context.Context) error { + _, err := s.task.Start(ctx, &task.StartRequest{ ID: s.ID(), }) if err != nil { return errdefs.FromGRPC(err) } - s.taskPid = int(response.Pid) return nil } -func (s *shim) Kill(ctx context.Context, signal uint32, all bool) error { +func (s *shimTask) Kill(ctx context.Context, signal uint32, all bool) error { if _, err := s.task.Kill(ctx, &task.KillRequest{ ID: s.ID(), Signal: signal, @@ -357,9 +392,9 @@ func (s *shim) Kill(ctx context.Context, signal uint32, all bool) error { return nil } -func (s *shim) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) { +func (s *shimTask) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.ExecProcess, error) { if err := identifiers.Validate(id); err != nil { - return nil, errors.Wrapf(err, "invalid exec id %s", id) + return nil, fmt.Errorf("invalid exec id %s: %w", id, err) } request := &task.ExecProcessRequest{ ID: s.ID(), @@ -379,7 +414,7 @@ func (s *shim) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runt }, nil } -func (s *shim) Pids(ctx context.Context) ([]runtime.ProcessInfo, error) { +func (s *shimTask) Pids(ctx context.Context) ([]runtime.ProcessInfo, error) { resp, err := s.task.Pids(ctx, &task.PidsRequest{ ID: s.ID(), }) @@ -396,7 +431,7 @@ func (s *shim) Pids(ctx context.Context) ([]runtime.ProcessInfo, error) { return processList, nil } -func (s *shim) ResizePty(ctx context.Context, size runtime.ConsoleSize) error { +func (s *shimTask) ResizePty(ctx context.Context, size runtime.ConsoleSize) error { _, err := s.task.ResizePty(ctx, &task.ResizePtyRequest{ ID: s.ID(), Width: size.Width, @@ -408,7 +443,7 @@ func (s *shim) ResizePty(ctx context.Context, size runtime.ConsoleSize) error { return nil } -func (s *shim) CloseIO(ctx context.Context) error { +func (s *shimTask) CloseIO(ctx context.Context) error { _, err := s.task.CloseIO(ctx, &task.CloseIORequest{ ID: s.ID(), Stdin: true, @@ -419,7 +454,11 @@ func (s *shim) CloseIO(ctx context.Context) error { return nil } -func (s *shim) Wait(ctx context.Context) (*runtime.Exit, error) { +func (s *shimTask) Wait(ctx context.Context) (*runtime.Exit, error) { + taskPid, err := s.PID(ctx) + if err != nil { + return nil, err + } response, err := s.task.Wait(ctx, &task.WaitRequest{ ID: s.ID(), }) @@ -427,13 +466,13 @@ func (s *shim) Wait(ctx context.Context) (*runtime.Exit, error) { return nil, errdefs.FromGRPC(err) } return &runtime.Exit{ - Pid: uint32(s.taskPid), + Pid: taskPid, Timestamp: response.ExitedAt, Status: response.ExitStatus, }, nil } -func (s *shim) Checkpoint(ctx context.Context, path string, options *ptypes.Any) error { +func (s *shimTask) Checkpoint(ctx context.Context, path string, options *ptypes.Any) error { request := &task.CheckpointTaskRequest{ ID: s.ID(), Path: path, @@ -445,7 +484,7 @@ func (s *shim) Checkpoint(ctx context.Context, path string, options *ptypes.Any) return nil } -func (s *shim) Update(ctx context.Context, resources *ptypes.Any, annotations map[string]string) error { +func (s *shimTask) Update(ctx context.Context, resources *ptypes.Any, annotations map[string]string) error { if _, err := s.task.Update(ctx, &task.UpdateTaskRequest{ ID: s.ID(), Resources: resources, @@ -456,7 +495,7 @@ func (s *shim) Update(ctx context.Context, resources *ptypes.Any, annotations ma return nil } -func (s *shim) Stats(ctx context.Context) (*ptypes.Any, error) { +func (s *shimTask) Stats(ctx context.Context) (*ptypes.Any, error) { response, err := s.task.Stats(ctx, &task.StatsRequest{ ID: s.ID(), }) @@ -466,7 +505,7 @@ func (s *shim) Stats(ctx context.Context) (*ptypes.Any, error) { return response.Stats, nil } -func (s *shim) Process(ctx context.Context, id string) (runtime.Process, error) { +func (s *shimTask) Process(ctx context.Context, id string) (runtime.ExecProcess, error) { p := &process{ id: id, shim: s, @@ -477,7 +516,7 @@ func (s *shim) Process(ctx context.Context, id string) (runtime.Process, error) return p, nil } -func (s *shim) State(ctx context.Context) (runtime.State, error) { +func (s *shimTask) State(ctx context.Context) (runtime.State, error) { response, err := s.task.State(ctx, &task.StateRequest{ ID: s.ID(), }) diff --git a/runtime/v2/shim/shim.go b/runtime/v2/shim/shim.go index c14aacc..e5822cd 100644 --- a/runtime/v2/shim/shim.go +++ b/runtime/v2/shim/shim.go @@ -18,6 +18,7 @@ package shim import ( "context" + "errors" "flag" "fmt" "io" @@ -30,21 +31,15 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/plugin" shimapi "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/containerd/version" "github.com/containerd/ttrpc" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Client for a shim server -type Client struct { - service shimapi.TaskService - context context.Context - signals chan os.Signal -} - // Publisher for events type Publisher interface { events.Publisher @@ -53,22 +48,38 @@ type Publisher interface { // StartOpts describes shim start configuration received from containerd type StartOpts struct { - ID string + ID string // TODO(2.0): Remove ID, passed directly to start for call symmetry ContainerdBinary string Address string TTRPCAddress string + Debug bool +} + +type StopStatus struct { + Pid int + ExitStatus int + ExitedAt time.Time } // Init func for the creation of a shim server +// TODO(2.0): Remove init function type Init func(context.Context, string, Publisher, func()) (Shim, error) // Shim server interface +// TODO(2.0): Remove unified shim interface type Shim interface { shimapi.TaskService Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) StartShim(ctx context.Context, opts StartOpts) (string, error) } +// Manager is the interface which manages the shim process +type Manager interface { + Name() string + Start(ctx context.Context, id string, opts StartOpts) (string, error) + Stop(ctx context.Context, id string) (StopStatus, error) +} + // OptsKey is the context key for the Opts value. type OptsKey struct{} @@ -91,10 +102,29 @@ type Config struct { NoSetupLogger bool } +type ttrpcService interface { + RegisterTTRPC(*ttrpc.Server) error +} + +type ttrpcServerOptioner interface { + ttrpcService + + UnaryInterceptor() ttrpc.UnaryServerInterceptor +} + +type taskService struct { + shimapi.TaskService +} + +func (t taskService) RegisterTTRPC(server *ttrpc.Server) error { + shimapi.RegisterTaskService(server, t.TaskService) + return nil +} + var ( debugFlag bool versionFlag bool - idFlag string + id string namespaceFlag string socketFlag string bundlePath string @@ -111,7 +141,7 @@ func parseFlags() { flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") flag.BoolVar(&versionFlag, "v", false, "show the shim version and exit") flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") - flag.StringVar(&idFlag, "id", "", "id of the task") + flag.StringVar(&id, "id", "", "id of the task") flag.StringVar(&socketFlag, "socket", "", "socket path to serve") flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") @@ -136,35 +166,85 @@ func setRuntime() { } } -func setLogger(ctx context.Context, id string) error { - logrus.SetFormatter(&logrus.TextFormatter{ +func setLogger(ctx context.Context, id string) (context.Context, error) { + l := log.G(ctx) + l.Logger.SetFormatter(&logrus.TextFormatter{ TimestampFormat: log.RFC3339NanoFixed, FullTimestamp: true, }) if debugFlag { - logrus.SetLevel(logrus.DebugLevel) + l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) - if err != nil { - return err + if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error + return ctx, err } - logrus.SetOutput(f) - return nil + l.Logger.SetOutput(f) + return log.WithLogger(ctx, l), nil } // Run initializes and runs a shim server -func Run(id string, initFunc Init, opts ...BinaryOpts) { +// TODO(2.0): Remove function +func Run(name string, initFunc Init, opts ...BinaryOpts) { var config Config for _, o := range opts { o(&config) } - if err := run(id, initFunc, config); err != nil { - fmt.Fprintf(os.Stderr, "%s: %s\n", id, err) + + ctx := context.Background() + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", name)) + + if err := run(ctx, nil, initFunc, name, config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", name, err) os.Exit(1) } } -func run(id string, initFunc Init, config Config) error { +// TODO(2.0): Remove this type +type shimToManager struct { + shim Shim + name string +} + +func (stm shimToManager) Name() string { + return stm.name +} + +func (stm shimToManager) Start(ctx context.Context, id string, opts StartOpts) (string, error) { + opts.ID = id + return stm.shim.StartShim(ctx, opts) +} + +func (stm shimToManager) Stop(ctx context.Context, id string) (StopStatus, error) { + // shim must already have id + dr, err := stm.shim.Cleanup(ctx) + if err != nil { + return StopStatus{}, err + } + return StopStatus{ + Pid: int(dr.Pid), + ExitStatus: int(dr.ExitStatus), + ExitedAt: dr.ExitedAt, + }, nil +} + +// RunManager initialzes and runs a shim server +// TODO(2.0): Rename to Run +func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { + var config Config + for _, o := range opts { + o(&config) + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", manager.Name())) + + if err := run(ctx, manager, nil, "", config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", manager.Name(), err) + os.Exit(1) + } +} + +func run(ctx context.Context, manager Manager, initFunc Init, name string, config Config) error { parseFlags() if versionFlag { fmt.Printf("%s:\n", os.Args[0]) @@ -182,12 +262,12 @@ func run(id string, initFunc Init, config Config) error { setRuntime() signals, err := setupSignals(config) - if err != nil { + if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error return err } if !config.NoSubreaper { - if err := subreaper(); err != nil { + if err := subreaper(); err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error return err } } @@ -199,27 +279,49 @@ func run(id string, initFunc Init, config Config) error { } defer publisher.Close() - ctx := namespaces.WithNamespace(context.Background(), namespaceFlag) + ctx = namespaces.WithNamespace(ctx, namespaceFlag) ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag}) - ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id)) - ctx, cancel := context.WithCancel(ctx) - service, err := initFunc(ctx, idFlag, publisher, cancel) - if err != nil { - return err - } + ctx, sd := shutdown.WithShutdown(ctx) + defer sd.Shutdown() - switch action { - case "delete": - logger := logrus.WithFields(logrus.Fields{ - "pid": os.Getpid(), - "namespace": namespaceFlag, - }) - go handleSignals(ctx, logger, signals) - response, err := service.Cleanup(ctx) + if manager == nil { + service, err := initFunc(ctx, id, publisher, sd.Shutdown) if err != nil { return err } - data, err := proto.Marshal(response) + plugin.Register(&plugin.Registration{ + Type: plugin.TTRPCPlugin, + ID: "task", + Requires: []plugin.Type{ + plugin.EventPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return taskService{service}, nil + }, + }) + manager = shimToManager{ + shim: service, + name: name, + } + } + + // Handle explicit actions + switch action { + case "delete": + logger := log.G(ctx).WithFields(logrus.Fields{ + "pid": os.Getpid(), + "namespace": namespaceFlag, + }) + go reap(ctx, logger, signals) + ss, err := manager.Stop(ctx, id) + if err != nil { + return err + } + data, err := proto.Marshal(&shimapi.DeleteResponse{ + Pid: uint32(ss.Pid), + ExitStatus: uint32(ss.ExitStatus), + ExitedAt: ss.ExitedAt, + }) if err != nil { return err } @@ -229,12 +331,13 @@ func run(id string, initFunc Init, config Config) error { return nil case "start": opts := StartOpts{ - ID: idFlag, ContainerdBinary: containerdBinaryFlag, Address: addressFlag, TTRPCAddress: ttrpcAddress, + Debug: debugFlag, } - address, err := service.StartShim(ctx, opts) + + address, err := manager.Start(ctx, id, opts) if err != nil { return err } @@ -242,46 +345,131 @@ func run(id string, initFunc Init, config Config) error { return err } return nil - default: - if !config.NoSetupLogger { - if err := setLogger(ctx, idFlag); err != nil { - return err - } + } + + if !config.NoSetupLogger { + ctx, err = setLogger(ctx, id) + if err != nil { + return err } - client := NewShimClient(ctx, service, signals) - if err := client.Serve(); err != nil { - if err != context.Canceled { - return err - } + } + + plugin.Register(&plugin.Registration{ + Type: plugin.InternalPlugin, + ID: "shutdown", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return sd, nil + }, + }) + + // Register event plugin + plugin.Register(&plugin.Registration{ + Type: plugin.EventPlugin, + ID: "publisher", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return publisher, nil + }, + }) + + var ( + initialized = plugin.NewPluginSet() + ttrpcServices = []ttrpcService{} + + ttrpcUnaryInterceptors = []ttrpc.UnaryServerInterceptor{} + ) + plugins := plugin.Graph(func(*plugin.Registration) bool { return false }) + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + // NOTE: Root is empty since the shim does not support persistent storage, + // shim plugins should make use state directory for writing files to disk. + // The state directory will be destroyed when the shim if cleaned up or + // on reboot + "", + bundlePath, + ) + initContext.Address = addressFlag + initContext.TTRPCAddress = ttrpcAddress + + // load the plugin specific configuration if it is provided + // TODO: Read configuration passed into shim, or from state directory? + // if p.Config != nil { + // pc, err := config.Decode(p) + // if err != nil { + // return nil, err + // } + // initContext.Config = pc + // } + + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return fmt.Errorf("could not add plugin result to plugin set: %w", err) } - // NOTE: If the shim server is down(like oom killer), the address - // socket might be leaking. - if address, err := ReadAddress("address"); err == nil { - _ = RemoveSocket(address) + instance, err := result.Instance() + if err != nil { + if plugin.IsSkipPlugin(err) { + log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id) + continue + } + return fmt.Errorf("failed to load plugin %s: %w", id, err) } - select { - case <-publisher.Done(): - return nil - case <-time.After(5 * time.Second): - return errors.New("publisher not closed") + if src, ok := instance.(ttrpcService); ok { + logrus.WithField("id", id).Debug("registering ttrpc service") + ttrpcServices = append(ttrpcServices, src) + } + + if src, ok := instance.(ttrpcServerOptioner); ok { + ttrpcUnaryInterceptors = append(ttrpcUnaryInterceptors, src.UnaryInterceptor()) + } + } + + if len(ttrpcServices) == 0 { + return fmt.Errorf("required that ttrpc service") + } + + unaryInterceptor := chainUnaryServerInterceptors(ttrpcUnaryInterceptors...) + server, err := newServer(ttrpc.WithUnaryServerInterceptor(unaryInterceptor)) + if err != nil { + return fmt.Errorf("failed creating server: %w", err) + } + + for _, srv := range ttrpcServices { + if err := srv.RegisterTTRPC(server); err != nil { + return fmt.Errorf("failed to register service: %w", err) + } + } + + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error + if err != shutdown.ErrShutdown { + return err + } + } + + // NOTE: If the shim server is down(like oom killer), the address + // socket might be leaking. + if address, err := ReadAddress("address"); err == nil { + _ = RemoveSocket(address) + } + + select { + case <-publisher.Done(): + return nil + case <-time.After(5 * time.Second): + return errors.New("publisher not closed") } } -// NewShimClient creates a new shim server client -func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client { - s := &Client{ - service: svc, - context: ctx, - signals: signals, - } - return s -} - -// Serve the shim server -func (s *Client) Serve() error { +// serve serves the ttrpc API over a unix socket in the current working directory +// and blocks until the context is canceled +func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, shutdown func()) error { dump := make(chan os.Signal, 32) setupDumpStacks(dump) @@ -289,18 +477,19 @@ func (s *Client) Serve() error { if err != nil { return err } - server, err := newServer() - if err != nil { - return errors.Wrap(err, "failed creating server") - } - logrus.Debug("registering ttrpc server") - shimapi.RegisterTaskService(server, s.service) - - if err := serve(s.context, server, socketFlag); err != nil { + l, err := serveListener(socketFlag) + if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error return err } - logger := logrus.WithFields(logrus.Fields{ + go func() { + defer l.Close() + if err := server.Serve(ctx, l); err != nil && + !strings.Contains(err.Error(), "use of closed network connection") { + log.G(ctx).WithError(err).Fatal("containerd-shim: ttrpc server failure") + } + }() + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "path": path, "namespace": namespaceFlag, @@ -310,24 +499,9 @@ func (s *Client) Serve() error { dumpStacks(logger) } }() - return handleSignals(s.context, logger, s.signals) -} -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block -func serve(ctx context.Context, server *ttrpc.Server, path string) error { - l, err := serveListener(path) - if err != nil { - return err - } - go func() { - defer l.Close() - if err := server.Serve(ctx, l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure") - } - }() - return nil + go handleExitSignals(ctx, logger, shutdown) + return reap(ctx, logger, signals) } func dumpStacks(logger *logrus.Entry) { diff --git a/runtime/v2/shim/shim_darwin.go b/runtime/v2/shim/shim_darwin.go index 314b45c..0bdf289 100644 --- a/runtime/v2/shim/shim_darwin.go +++ b/runtime/v2/shim/shim_darwin.go @@ -1,5 +1,3 @@ -// +build darwin - /* Copyright The containerd Authors. @@ -20,8 +18,8 @@ package shim import "github.com/containerd/ttrpc" -func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer() +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { + return ttrpc.NewServer(opts...) } func subreaper() error { diff --git a/runtime/v2/shim/shim_freebsd.go b/runtime/v2/shim/shim_freebsd.go index 4bc6362..0bdf289 100644 --- a/runtime/v2/shim/shim_freebsd.go +++ b/runtime/v2/shim/shim_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. @@ -20,8 +18,8 @@ package shim import "github.com/containerd/ttrpc" -func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer() +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { + return ttrpc.NewServer(opts...) } func subreaper() error { diff --git a/runtime/v2/shim/shim_linux.go b/runtime/v2/shim/shim_linux.go index 06266a5..1c05c2c 100644 --- a/runtime/v2/shim/shim_linux.go +++ b/runtime/v2/shim/shim_linux.go @@ -21,8 +21,9 @@ import ( "github.com/containerd/ttrpc" ) -func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { + opts = append(opts, ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) + return ttrpc.NewServer(opts...) } func subreaper() error { diff --git a/runtime/v2/shim/shim_test.go b/runtime/v2/shim/shim_test.go index 2ba6fbc..6077adb 100644 --- a/runtime/v2/shim/shim_test.go +++ b/runtime/v2/shim/shim_test.go @@ -18,7 +18,6 @@ package shim import ( "context" - "os" "runtime" "testing" ) @@ -27,7 +26,7 @@ func TestRuntimeWithEmptyMaxEnvProcs(t *testing.T) { var oldGoMaxProcs = runtime.GOMAXPROCS(0) defer runtime.GOMAXPROCS(oldGoMaxProcs) - os.Setenv("GOMAXPROCS", "") + t.Setenv("GOMAXPROCS", "") setRuntime() var currentGoMaxProcs = runtime.GOMAXPROCS(0) @@ -37,7 +36,7 @@ func TestRuntimeWithEmptyMaxEnvProcs(t *testing.T) { } func TestRuntimeWithNonEmptyMaxEnvProcs(t *testing.T) { - os.Setenv("GOMAXPROCS", "not_empty") + t.Setenv("GOMAXPROCS", "not_empty") setRuntime() var oldGoMaxProcs2 = runtime.GOMAXPROCS(0) if oldGoMaxProcs2 != runtime.NumCPU() { diff --git a/runtime/v2/shim/shim_unix.go b/runtime/v2/shim/shim_unix.go index a61b642..e2dab09 100644 --- a/runtime/v2/shim/shim_unix.go +++ b/runtime/v2/shim/shim_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,6 +21,7 @@ package shim import ( "context" + "fmt" "io" "net" "os" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/sys/reaper" "github.com/containerd/fifo" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -59,7 +60,7 @@ func serveListener(path string) (net.Listener, error) { path = "[inherited from parent]" } else { if len(path) > socketPathLimit { - return nil, errors.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) + return nil, fmt.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) } l, err = net.Listen("unix", path) } @@ -70,7 +71,7 @@ func serveListener(path string) (net.Listener, error) { return l, nil } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { logger.Info("starting signal loop") for { @@ -78,6 +79,8 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si case <-ctx.Done(): return ctx.Err() case s := <-signals: + // Exit signals are handled separately from this loop + // They get registered with this channel so that we can ignore such signals for short-running actions (e.g. `delete`) switch s { case unix.SIGCHLD: if err := reaper.Reap(); err != nil { @@ -89,6 +92,22 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si } } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { + ch := make(chan os.Signal, 32) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + + for { + select { + case s := <-ch: + logger.WithField("signal", s).Debugf("Caught exit signal") + cancel() + return + case <-ctx.Done(): + return + } + } +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return fifo.OpenFifoDup2(ctx, "log", unix.O_WRONLY, 0700, int(os.Stderr.Fd())) } diff --git a/runtime/v2/shim/shim_windows.go b/runtime/v2/shim/shim_windows.go index 7339eb2..2add7ac 100644 --- a/runtime/v2/shim/shim_windows.go +++ b/runtime/v2/shim/shim_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,12 +18,12 @@ package shim import ( "context" + "errors" "io" "net" "os" "github.com/containerd/ttrpc" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -33,7 +31,7 @@ func setupSignals(config Config) (chan os.Signal, error) { return nil, errors.New("not supported") } -func newServer() (*ttrpc.Server, error) { +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { return nil, errors.New("not supported") } @@ -48,10 +46,13 @@ func serveListener(path string) (net.Listener, error) { return nil, errors.New("not supported") } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { return errors.New("not supported") } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return nil, errors.New("not supported") } diff --git a/runtime/v2/shim/util.go b/runtime/v2/shim/util.go index 52bfaa9..94c0f53 100644 --- a/runtime/v2/shim/util.go +++ b/runtime/v2/shim/util.go @@ -19,26 +19,35 @@ package shim import ( "bytes" "context" + "errors" "fmt" - "io/ioutil" "net" "os" - "os/exec" "path/filepath" "strings" - "sync" "time" - "github.com/containerd/containerd/namespaces" + "github.com/containerd/ttrpc" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" + + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/atomicfile" ) -var runtimePaths sync.Map +type CommandConfig struct { + Runtime string + Address string + TTRPCAddress string + Path string + SchedCore bool + Args []string + Opts *types.Any +} // Command returns the shim command with the provided args and configuration -func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAddress, path string, opts *types.Any, cmdArgs ...string) (*exec.Cmd, error) { +func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -49,67 +58,23 @@ func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAdd } args := []string{ "-namespace", ns, - "-address", containerdAddress, + "-address", config.Address, "-publish-binary", self, } - args = append(args, cmdArgs...) - name := BinaryName(runtime) - if name == "" { - return nil, fmt.Errorf("invalid runtime name %s, correct runtime name should format like io.containerd.runc.v1", runtime) - } - - var cmdPath string - cmdPathI, cmdPathFound := runtimePaths.Load(name) - if cmdPathFound { - cmdPath = cmdPathI.(string) - } else { - var lerr error - binaryPath := BinaryPath(runtime) - if _, serr := os.Stat(binaryPath); serr == nil { - cmdPath = binaryPath - } - - if cmdPath == "" { - if cmdPath, lerr = exec.LookPath(name); lerr != nil { - if eerr, ok := lerr.(*exec.Error); ok { - if eerr.Err == exec.ErrNotFound { - // LookPath only finds current directory matches based on - // the callers current directory but the caller is not - // likely in the same directory as the containerd - // executables. Instead match the calling binaries path - // (containerd) and see if they are side by side. If so - // execute the shim found there. - testPath := filepath.Join(filepath.Dir(self), name) - if _, serr := os.Stat(testPath); serr == nil { - cmdPath = testPath - } - if cmdPath == "" { - return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) - } - } - } - } - } - cmdPath, err = filepath.Abs(cmdPath) - if err != nil { - return nil, err - } - if cmdPathI, cmdPathFound = runtimePaths.LoadOrStore(name, cmdPath); cmdPathFound { - // We didn't store cmdPath we loaded an already cached value. Use it. - cmdPath = cmdPathI.(string) - } - } - - cmd := exec.Command(cmdPath, args...) - cmd.Dir = path + args = append(args, config.Args...) + cmd := exec.CommandContext(ctx, config.Runtime, args...) + cmd.Dir = config.Path cmd.Env = append( os.Environ(), "GOMAXPROCS=2", - fmt.Sprintf("%s=%s", ttrpcAddressEnv, containerdTTRPCAddress), + fmt.Sprintf("%s=%s", ttrpcAddressEnv, config.TTRPCAddress), ) + if config.SchedCore { + cmd.Env = append(cmd.Env, "SCHED_CORE=1") + } cmd.SysProcAttr = getSysProcAttr() - if opts != nil { - d, err := proto.Marshal(opts) + if config.Opts != nil { + d, err := proto.Marshal(config.Opts) if err != nil { return nil, err } @@ -155,17 +120,16 @@ func WritePidFile(path string, pid int) error { if err != nil { return err } - tempPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path))) - f, err := os.OpenFile(tempPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) + f, err := atomicfile.New(path, 0o666) if err != nil { return err } _, err = fmt.Fprintf(f, "%d", pid) - f.Close() if err != nil { + f.Cancel() return err } - return os.Rename(tempPath, path) + return f.Close() } // WriteAddress writes a address file atomically @@ -174,17 +138,16 @@ func WriteAddress(path, address string) error { if err != nil { return err } - tempPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path))) - f, err := os.OpenFile(tempPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) + f, err := atomicfile.New(path, 0o666) if err != nil { return err } - _, err = f.WriteString(address) - f.Close() + _, err = f.Write([]byte(address)) if err != nil { + f.Cancel() return err } - return os.Rename(tempPath, path) + return f.Close() } // ErrNoAddress is returned when the address file has no content @@ -196,7 +159,7 @@ func ReadAddress(path string) (string, error) { if err != nil { return "", err } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return "", err } @@ -205,3 +168,28 @@ func ReadAddress(path string) (string, error) { } return string(data), nil } + +// chainUnaryServerInterceptors creates a single ttrpc server interceptor from +// a chain of many interceptors executed from first to last. +func chainUnaryServerInterceptors(interceptors ...ttrpc.UnaryServerInterceptor) ttrpc.UnaryServerInterceptor { + n := len(interceptors) + + // force to use default interceptor in ttrpc + if n == 0 { + return nil + } + + return func(ctx context.Context, unmarshal ttrpc.Unmarshaler, info *ttrpc.UnaryServerInfo, method ttrpc.Method) (interface{}, error) { + currentMethod := method + + for i := n - 1; i > 0; i-- { + interceptor := interceptors[i] + innerMethod := currentMethod + + currentMethod = func(currentCtx context.Context, currentUnmarshal func(interface{}) error) (interface{}, error) { + return interceptor(currentCtx, currentUnmarshal, info, innerMethod) + } + } + return interceptors[0](ctx, unmarshal, info, currentMethod) + } +} diff --git a/runtime/v2/shim/util_test.go b/runtime/v2/shim/util_test.go new file mode 100644 index 0000000..8341bcd --- /dev/null +++ b/runtime/v2/shim/util_test.go @@ -0,0 +1,118 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "path/filepath" + "reflect" + "testing" + + "github.com/containerd/ttrpc" +) + +func TestChainUnaryServerInterceptors(t *testing.T) { + methodInfo := &ttrpc.UnaryServerInfo{ + FullMethod: filepath.Join("/", t.Name(), "foo"), + } + + type callKey struct{} + callValue := "init" + callCtx := context.WithValue(context.Background(), callKey{}, callValue) + + verifyCallCtxFn := func(ctx context.Context, key interface{}, expected interface{}) { + got := ctx.Value(key) + if !reflect.DeepEqual(expected, got) { + t.Fatalf("[context(key:%s) expected %v, but got %v", key, expected, got) + } + } + + verifyInfoFn := func(info *ttrpc.UnaryServerInfo) { + if !reflect.DeepEqual(methodInfo, info) { + t.Fatalf("[info] expected %+v, but got %+v", methodInfo, info) + } + } + + origUnmarshaler := func(obj interface{}) error { + v := obj.(*int64) + *v *= 2 + return nil + } + + type firstKey struct{} + firstValue := "from first" + var firstUnmarshaler ttrpc.Unmarshaler + first := func(ctx context.Context, unmarshal ttrpc.Unmarshaler, info *ttrpc.UnaryServerInfo, method ttrpc.Method) (interface{}, error) { + verifyCallCtxFn(ctx, callKey{}, callValue) + verifyInfoFn(info) + + ctx = context.WithValue(ctx, firstKey{}, firstValue) + + firstUnmarshaler = func(obj interface{}) error { + if err := unmarshal(obj); err != nil { + return err + } + + v := obj.(*int64) + *v *= 2 + return nil + } + + return method(ctx, firstUnmarshaler) + } + + type secondKey struct{} + secondValue := "from second" + second := func(ctx context.Context, unmarshal ttrpc.Unmarshaler, info *ttrpc.UnaryServerInfo, method ttrpc.Method) (interface{}, error) { + verifyCallCtxFn(ctx, callKey{}, callValue) + verifyCallCtxFn(ctx, firstKey{}, firstValue) + verifyInfoFn(info) + + v := int64(3) // should return 12 + if err := unmarshal(&v); err != nil { + t.Fatalf("unexpected error %v", err) + } + if expected := int64(12); v != expected { + t.Fatalf("expected int64(%v), but got %v", expected, v) + } + + ctx = context.WithValue(ctx, secondKey{}, secondValue) + return method(ctx, unmarshal) + } + + methodFn := func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + verifyCallCtxFn(ctx, callKey{}, callValue) + verifyCallCtxFn(ctx, firstKey{}, firstValue) + verifyCallCtxFn(ctx, secondKey{}, secondValue) + + v := int64(2) + if err := unmarshal(&v); err != nil { + return nil, err + } + return v, nil + } + + interceptor := chainUnaryServerInterceptors(first, second) + v, err := interceptor(callCtx, origUnmarshaler, methodInfo, methodFn) + if err != nil { + t.Fatalf("expected nil, but got %v", err) + } + + if expected := int64(8); v != expected { + t.Fatalf("expected result is int64(%v), but got %v", expected, v) + } +} diff --git a/runtime/v2/shim/util_unix.go b/runtime/v2/shim/util_unix.go index f956b09..4e2309a 100644 --- a/runtime/v2/shim/util_unix.go +++ b/runtime/v2/shim/util_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -29,10 +30,9 @@ import ( "syscall" "time" + "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/sys" - "github.com/pkg/errors" ) const ( @@ -53,16 +53,16 @@ func AdjustOOMScore(pid int) error { parent := os.Getppid() score, err := sys.GetOOMScoreAdj(parent) if err != nil { - return errors.Wrap(err, "get parent OOM score") + return fmt.Errorf("get parent OOM score: %w", err) } shimScore := score + 1 if err := sys.AdjustOOMScore(pid, shimScore); err != nil { - return errors.Wrap(err, "set shim OOM score") + return fmt.Errorf("set shim OOM score: %w", err) } return nil } -const socketRoot = "/run/containerd" +const socketRoot = defaults.DefaultStateDir // SocketAddress returns a socket address func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { @@ -76,7 +76,7 @@ func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { // AnonDialer returns a dialer for a socket func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { - return dialer.Dialer(socket(address).path(), timeout) + return net.DialTimeout("unix", socket(address).path(), timeout) } // AnonReconnectDialer returns a dialer for an existing socket on reconnection @@ -90,19 +90,25 @@ func NewSocket(address string) (*net.UnixListener, error) { sock = socket(address) path = sock.path() ) - if !sock.isAbstract() { + + isAbstract := sock.isAbstract() + + if !isAbstract { if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, errors.Wrapf(err, "%s", path) + return nil, fmt.Errorf("%s: %w", path, err) } } l, err := net.Listen("unix", path) if err != nil { return nil, err } - if err := os.Chmod(path, 0600); err != nil { - os.Remove(sock.path()) - l.Close() - return nil, err + + if !isAbstract { + if err := os.Chmod(path, 0600); err != nil { + os.Remove(sock.path()) + l.Close() + return nil, err + } } return l.(*net.UnixListener), nil } diff --git a/runtime/v2/shim/util_windows.go b/runtime/v2/shim/util_windows.go index 325c290..b904284 100644 --- a/runtime/v2/shim/util_windows.go +++ b/runtime/v2/shim/util_windows.go @@ -18,13 +18,13 @@ package shim import ( "context" + "fmt" "net" "os" "syscall" "time" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" ) const shimBinaryFormat = "containerd-shim-%s-%s.exe" @@ -40,9 +40,9 @@ func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error c, err := winio.DialPipeContext(ctx, address) if os.IsNotExist(err) { - return nil, errors.Wrap(os.ErrNotExist, "npipe not found on reconnect") + return nil, fmt.Errorf("npipe not found on reconnect: %w", os.ErrNotExist) } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } else if err != nil { return nil, err } @@ -65,14 +65,14 @@ func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { if os.IsNotExist(err) { select { case <-serveTimer.C: - return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + return nil, fmt.Errorf("pipe not found before timeout: %w", os.ErrNotExist) default: // Wait 10ms for the shim to serve and try again. time.Sleep(10 * time.Millisecond) continue } } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } return nil, err } diff --git a/runtime/v2/shim_load.go b/runtime/v2/shim_load.go new file mode 100644 index 0000000..afd2dfb --- /dev/null +++ b/runtime/v2/shim_load.go @@ -0,0 +1,168 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v2 + +import ( + "context" + "os" + "path/filepath" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" +) + +func (m *ShimManager) loadExistingTasks(ctx context.Context) error { + nsDirs, err := os.ReadDir(m.state) + if err != nil { + return err + } + for _, nsd := range nsDirs { + if !nsd.IsDir() { + continue + } + ns := nsd.Name() + // skip hidden directories + if len(ns) > 0 && ns[0] == '.' { + continue + } + log.G(ctx).WithField("namespace", ns).Debug("loading tasks in namespace") + if err := m.loadShims(namespaces.WithNamespace(ctx, ns)); err != nil { + log.G(ctx).WithField("namespace", ns).WithError(err).Error("loading tasks in namespace") + continue + } + if err := m.cleanupWorkDirs(namespaces.WithNamespace(ctx, ns)); err != nil { + log.G(ctx).WithField("namespace", ns).WithError(err).Error("cleanup working directory in namespace") + continue + } + } + return nil +} + +func (m *ShimManager) loadShims(ctx context.Context) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + shimDirs, err := os.ReadDir(filepath.Join(m.state, ns)) + if err != nil { + return err + } + for _, sd := range shimDirs { + if !sd.IsDir() { + continue + } + id := sd.Name() + // skip hidden directories + if len(id) > 0 && id[0] == '.' { + continue + } + bundle, err := LoadBundle(ctx, m.state, id) + if err != nil { + // fine to return error here, it is a programmer error if the context + // does not have a namespace + return err + } + // fast path + bf, err := os.ReadDir(bundle.Path) + if err != nil { + bundle.Delete() + log.G(ctx).WithError(err).Errorf("fast path read bundle path for %s", bundle.Path) + continue + } + if len(bf) == 0 { + bundle.Delete() + continue + } + + var ( + runtime string + ) + + // If we're on 1.6+ and specified custom path to the runtime binary, path will be saved in 'shim-binary-path' file. + if data, err := os.ReadFile(filepath.Join(bundle.Path, "shim-binary-path")); err == nil { + runtime = string(data) + } else if err != nil && !os.IsNotExist(err) { + log.G(ctx).WithError(err).Error("failed to read `runtime` path from bundle") + } + + // Query runtime name from metadata store + if runtime == "" { + container, err := m.containers.Get(ctx, id) + if err != nil { + log.G(ctx).WithError(err).Errorf("loading container %s", id) + if err := mount.UnmountAll(filepath.Join(bundle.Path, "rootfs"), 0); err != nil { + log.G(ctx).WithError(err).Errorf("failed to unmount of rootfs %s", id) + } + bundle.Delete() + continue + } + runtime = container.Runtime.Name + } + + runtime, err = m.resolveRuntimePath(runtime) + if err != nil { + bundle.Delete() + log.G(ctx).WithError(err).Error("failed to resolve runtime path") + continue + } + + binaryCall := shimBinary(bundle, + shimBinaryConfig{ + runtime: runtime, + address: m.containerdAddress, + ttrpcAddress: m.containerdTTRPCAddress, + schedCore: m.schedCore, + }) + shim, err := loadShim(ctx, bundle, func() { + log.G(ctx).WithField("id", id).Info("shim disconnected") + + cleanupAfterDeadShim(context.Background(), id, ns, m.shims, m.events, binaryCall) + // Remove self from the runtime task list. + m.shims.Delete(ctx, id) + }) + if err != nil { + cleanupAfterDeadShim(ctx, id, ns, m.shims, m.events, binaryCall) + continue + } + m.shims.Add(ctx, shim) + } + return nil +} + +func (m *ShimManager) cleanupWorkDirs(ctx context.Context) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + dirs, err := os.ReadDir(filepath.Join(m.root, ns)) + if err != nil { + return err + } + for _, d := range dirs { + // if the task was not loaded, cleanup and empty working directory + // this can happen on a reboot where /run for the bundle state is cleaned up + // but that persistent working dir is left + if _, err := m.shims.Get(ctx, d.Name()); err != nil { + path := filepath.Join(m.root, ns, d.Name()) + if err := os.RemoveAll(path); err != nil { + log.G(ctx).WithError(err).Errorf("cleanup working dir %s", path) + } + } + } + return nil +} diff --git a/runtime/v2/shim_unix.go b/runtime/v2/shim_unix.go index e5d8305..9ebfb27 100644 --- a/runtime/v2/shim_unix.go +++ b/runtime/v2/shim_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,6 +21,7 @@ package v2 import ( "context" + "errors" "io" "net" "os" @@ -27,7 +29,6 @@ import ( "time" "github.com/containerd/fifo" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) diff --git a/runtime/v2/shim_unix_test.go b/runtime/v2/shim_unix_test.go index a14357e..dba47c8 100644 --- a/runtime/v2/shim_unix_test.go +++ b/runtime/v2/shim_unix_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/runtime/v2/shim_windows.go b/runtime/v2/shim_windows.go index dc007c3..913e5b4 100644 --- a/runtime/v2/shim_windows.go +++ b/runtime/v2/shim_windows.go @@ -18,6 +18,7 @@ package v2 import ( "context" + "errors" "fmt" "io" "net" @@ -26,7 +27,6 @@ import ( "time" "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" ) type deferredPipeConnection struct { @@ -78,7 +78,7 @@ func openShimLog(ctx context.Context, bundle *Bundle, dialer func(string, time.D time.Second*10, ) if conerr != nil { - dpc.conerr = errors.Wrap(conerr, "failed to connect to shim log") + dpc.conerr = fmt.Errorf("failed to connect to shim log: %w", conerr) } dpc.c = c dpc.wg.Done() diff --git a/runtime/v2/shim_windows_test.go b/runtime/v2/shim_windows_test.go index d7660d5..b496c30 100644 --- a/runtime/v2/shim_windows_test.go +++ b/runtime/v2/shim_windows_test.go @@ -18,10 +18,9 @@ package v2 import ( "context" + "errors" "os" "testing" - - "github.com/pkg/errors" ) func TestCheckCopyShimLogError(t *testing.T) { diff --git a/script/setup/config-selinux b/script/setup/config-selinux index 16faf06..67a0d17 100755 --- a/script/setup/config-selinux +++ b/script/setup/config-selinux @@ -45,4 +45,4 @@ case "${SELINUX}" in ;; esac -echo SELinux is $(getenforce) +echo SELinux is "$(getenforce)" diff --git a/script/setup/critools-version b/script/setup/critools-version index eecca7c..df40772 100644 --- a/script/setup/critools-version +++ b/script/setup/critools-version @@ -1 +1 @@ -53ad8bb7f97e1b1d1c0c0634e43a3c2b8b07b718 +v1.27.0 diff --git a/script/setup/enable_docker_tls_on_windows.ps1 b/script/setup/enable_docker_tls_on_windows.ps1 new file mode 100644 index 0000000..07d3485 --- /dev/null +++ b/script/setup/enable_docker_tls_on_windows.ps1 @@ -0,0 +1,22 @@ +Param( + [parameter(Mandatory=$true)] + [string[]]$IPAddresses +) + +$ErrorActionPreference = "Stop" +$IPAddresses += "127.0.0.1" +$IPParams = $IPAddresses -join "," +mkdir $env:USERPROFILE\.docker + +docker run --isolation=hyperv --user=ContainerAdministrator --rm ` + -e SERVER_NAME=$(hostname) ` + -e IP_ADDRESSES=$IPParams ` + -v "c:\programdata\docker:c:\programdata\docker" ` + -v "$env:USERPROFILE\.docker:c:\users\containeradministrator\.docker" stefanscherer/dockertls-windows:2.5.5 + +if ($LASTEXITCODE) { + Throw "Failed to setup Docker TLS: $LASTEXITCODE" +} + +Stop-Service docker +Start-Service docker diff --git a/script/setup/enable_ssh_windows.ps1 b/script/setup/enable_ssh_windows.ps1 new file mode 100644 index 0000000..f142ecb --- /dev/null +++ b/script/setup/enable_ssh_windows.ps1 @@ -0,0 +1,37 @@ +Param( + [parameter(Mandatory=$true)] + [string]$SSHPublicKey +) + +$ErrorActionPreference = "Stop" + + +function Set-SSHPublicKey { + if(!$SSHPublicKey) { + return + } + $authorizedKeysFile = Join-Path $env:ProgramData "ssh\administrators_authorized_keys" + Set-Content -Path $authorizedKeysFile -Value $SSHPublicKey -Encoding ascii + $acl = Get-Acl $authorizedKeysFile + $acl.SetAccessRuleProtection($true, $false) + $administratorsRule = New-Object system.security.accesscontrol.filesystemaccessrule("Administrators", "FullControl", "Allow") + $systemRule = New-Object system.security.accesscontrol.filesystemaccessrule("SYSTEM", "FullControl", "Allow") + $acl.SetAccessRule($administratorsRule) + $acl.SetAccessRule($systemRule) + $acl | Set-Acl +} + +# Install OpenSSH +$( + +Get-WindowsCapability -Online -Name OpenSSH* | Add-WindowsCapability -Online +Set-Service -Name sshd -StartupType Automatic +Start-Service sshd + +# Authorize SSH key +Set-SSHPublicKey + +# Set PowerShell as default shell +New-ItemProperty -Force -Path "HKLM:\SOFTWARE\OpenSSH" -PropertyType String ` + -Name DefaultShell -Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" +) *>$1 >> c:\output.txt \ No newline at end of file diff --git a/script/setup/install-cni b/script/setup/install-cni index 3446195..5e67890 100755 --- a/script/setup/install-cni +++ b/script/setup/install-cni @@ -21,21 +21,28 @@ # set -eu -o pipefail -CNI_COMMIT=$(grep containernetworking/plugins "$GOPATH"/src/github.com/containerd/containerd/go.mod | awk '{print $2}') +CNI_COMMIT=${1:-$(go list -f "{{.Version}}" -m github.com/containernetworking/plugins)} CNI_DIR=${DESTDIR:=''}/opt/cni CNI_CONFIG_DIR=${DESTDIR}/etc/cni/net.d +: "${CNI_REPO:=https://github.com/containernetworking/plugins.git}" + +# e2e and Cirrus will fail with "sudo: command not found" +SUDO='' +if (( $EUID != 0 )); then + SUDO='sudo' +fi TMPROOT=$(mktemp -d) -git clone https://github.com/containernetworking/plugins.git "${TMPROOT}"/plugins +git clone "${CNI_REPO}" "${TMPROOT}"/plugins pushd "${TMPROOT}"/plugins -git checkout $CNI_COMMIT +git checkout "$CNI_COMMIT" ./build_linux.sh mkdir -p $CNI_DIR cp -r ./bin $CNI_DIR -mkdir -p $CNI_CONFIG_DIR -cat << EOF | tee $CNI_CONFIG_DIR/10-containerd-net.conflist +$SUDO mkdir -p $CNI_CONFIG_DIR +$SUDO cat << EOF | $SUDO tee $CNI_CONFIG_DIR/10-containerd-net.conflist { - "cniVersion": "0.4.0", + "cniVersion": "1.0.0", "name": "containerd-net", "plugins": [ { diff --git a/script/setup/install-cni-windows b/script/setup/install-cni-windows index bd52c10..32534c0 100755 --- a/script/setup/install-cni-windows +++ b/script/setup/install-cni-windows @@ -17,7 +17,7 @@ set -eu -o pipefail DESTDIR="${DESTDIR:-"C:\\Program Files\\containerd"}" -WINCNI_BIN_DIR="${DESTDIR}/cni" +WINCNI_BIN_DIR="${DESTDIR}/cni/bin" WINCNI_PKG=github.com/Microsoft/windows-container-networking WINCNI_VERSION=aa10a0b31e9f72937063436454def1760b858ee2 @@ -37,7 +37,7 @@ split_ip() { local -r varname="$1" local -r ip="$2" for i in {0..3}; do - eval "$varname"[$i]=$( echo "$ip" | cut -d '.' -f $((i + 1)) ) + eval "$varname"[$i]="$( echo "$ip" | cut -d '.' -f $((i + 1)) )" done } @@ -65,10 +65,10 @@ calculate_subnet() { # nat already exists on the Windows VM, the subnet and gateway # we specify should match that. -gateway="$(powershell -c "(Get-NetIPAddress -InterfaceAlias 'vEthernet (nat)' -AddressFamily IPv4).IPAddress")" -prefix_len="$(powershell -c "(Get-NetIPAddress -InterfaceAlias 'vEthernet (nat)' -AddressFamily IPv4).PrefixLength")" +: ${GATEWAY:="$(powershell -c "(Get-NetIPAddress -InterfaceAlias 'vEthernet (nat)' -AddressFamily IPv4).IPAddress")"} +: ${PREFIX_LEN:="$(powershell -c "(Get-NetIPAddress -InterfaceAlias 'vEthernet (nat)' -AddressFamily IPv4).PrefixLength")"} -subnet="$(calculate_subnet "$gateway" "$prefix_len")" +subnet="$(calculate_subnet "$GATEWAY" "$PREFIX_LEN")" # The "name" field in the config is used as the underlying # network type right now (see @@ -85,7 +85,7 @@ bash -c 'cat >"'"${CNI_CONFIG_DIR}"'"/0-containerd-nat.conf < /dev/null 2>&1; pwd -P)" -cd "$GOPATH" -go get -u github.com/onsi/ginkgo/ginkgo +# e2e will fail with "sudo: command not found" +SUDO='' +if (( $EUID != 0 )); then + SUDO='sudo' +fi + +cd "$(go env GOPATH)" +go install github.com/onsi/ginkgo/v2/ginkgo@v2.9.2 : "${CRITEST_COMMIT:=$(cat "${script_dir}/critools-version")}" +: "${DESTDIR:=""}" +: "${CRI_TOOLS_REPO:=https://github.com/kubernetes-sigs/cri-tools.git}" TMPROOT=$(mktemp -d) -git clone https://github.com/kubernetes-sigs/cri-tools.git "${TMPROOT}"/cri-tools +git clone "${CRI_TOOLS_REPO}" "${TMPROOT}/cri-tools" pushd "${TMPROOT}"/cri-tools git checkout "$CRITEST_COMMIT" make -make install -e DESTDIR=${DESTDIR:=''} BINDIR=/usr/local/bin +$SUDO make install -e DESTDIR="${DESTDIR}" BINDIR=/usr/local/bin -cat << EOF | tee ${DESTDIR:=''}/etc/crictl.yaml +mkdir -p "${DESTDIR}/etc/" +$SUDO cat << EOF | $SUDO tee "${DESTDIR}/etc/crictl.yaml" runtime-endpoint: unix:///run/containerd/containerd.sock EOF diff --git a/script/setup/install-dev-tools b/script/setup/install-dev-tools index 8ec65cf..b94dc3e 100755 --- a/script/setup/install-dev-tools +++ b/script/setup/install-dev-tools @@ -14,24 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. - # # Install developer tools to $GOBIN (or $GOPATH/bin if unset) # set -eu -o pipefail -# change to tmp dir, otherwise `go get` will change go.mod -cd "$GOPATH" - -# install the `protobuild` binary in $GOPATH/bin; requires module-aware install -# to pin dependencies -GO111MODULE=on go get github.com/stevvooe/protobuild +# install `protobuild` and other commands +go install github.com/stevvooe/protobuild@v0.1.0 +go install github.com/cpuguy83/go-md2man/v2@v2.0.1 +go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.1 # the following packages need to exist in $GOPATH so we can't use # go modules-aware mode of `go get` for these includes used during # proto building +cd "$GOPATH" GO111MODULE=off go get -d github.com/gogo/googleapis || true GO111MODULE=off go get -d github.com/gogo/protobuf || true - -GO111MODULE=on go get github.com/cpuguy83/go-md2man/v2@v2.0.0 -GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.38.0 diff --git a/vendor/github.com/containerd/btrfs/Makefile b/script/setup/install-failpoint-binaries old mode 100644 new mode 100755 similarity index 54% rename from vendor/github.com/containerd/btrfs/Makefile rename to script/setup/install-failpoint-binaries index e89dd46..533eb54 --- a/vendor/github.com/containerd/btrfs/Makefile +++ b/script/setup/install-failpoint-binaries @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,23 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Build and install +# +# * cni-bridge-fp into /opt/cni/bin +# * containerd-shim-runc-fp-v1 into /usr/local/bin +# +set -euo pipefail -.PHONY: clean binaries generate lint vet test -all: vet lint test binaries +base_dir="$(dirname "${BASH_SOURCE[0]}")" +root_dir="$( cd "${base_dir}" && pwd )"/../.. -binaries: bin/btrfs-test +cd "${root_dir}" -vet: - go vet ./... +CNI_BIN_DIR=${CNI_BIN_DIR:-"/opt/cni/bin"} +make bin/cni-bridge-fp +sudo install bin/cni-bridge-fp "${CNI_BIN_DIR}" -lint: - golint ./... - -test: - go test -v ./... - -bin/%: ./cmd/% *.go - go build -o ./$@ ./$< - -clean: - rm -rf bin/* +SHIM_BIN_DIR=${SHIM_BIN_DIR:-"/usr/local/bin"} +make bin/containerd-shim-runc-fp-v1 +sudo install bin/containerd-shim-runc-fp-v1 "${SHIM_BIN_DIR}" diff --git a/script/setup/install-gotestsum b/script/setup/install-gotestsum index 90c4a3c..5aa5139 100755 --- a/script/setup/install-gotestsum +++ b/script/setup/install-gotestsum @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -GO111MODULE=on go install gotest.tools/gotestsum@v1.6.2 \ No newline at end of file +GO111MODULE=on go install gotest.tools/gotestsum@v1.7.0 diff --git a/script/setup/install-protobuf b/script/setup/install-protobuf index 88a9c98..ffb00fb 100755 --- a/script/setup/install-protobuf +++ b/script/setup/install-protobuf @@ -28,27 +28,27 @@ PROTOBUF_DIR=$(mktemp -d) case $GOARCH in arm64) - wget -O $PROTOBUF_DIR/protobuf "https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-linux-aarch64.zip" - unzip $PROTOBUF_DIR/protobuf -d /usr/local + wget -O "$PROTOBUF_DIR/protobuf" "https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-linux-aarch64.zip" + unzip "$PROTOBUF_DIR/protobuf" -d /usr/local ;; amd64|386) if [ "$GOOS" = windows ]; then - wget -O $PROTOBUF_DIR/protobuf "https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-win32.zip" + wget -O "$PROTOBUF_DIR/protobuf" "https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-win32.zip" elif [ "$GOOS" = linux ]; then - wget -O $PROTOBUF_DIR/protobuf "https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-linux-x86_64.zip" + wget -O "$PROTOBUF_DIR/protobuf" "https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-linux-x86_64.zip" fi - unzip $PROTOBUF_DIR/protobuf -d /usr/local + unzip "$PROTOBUF_DIR/protobuf" -d /usr/local ;; ppc64le) - wget -O $PROTOBUF_DIR/protobuf "https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-linux-ppcle_64.zip" - unzip $PROTOBUF_DIR/protobuf -d /usr/local + wget -O "$PROTOBUF_DIR/protobuf" "https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOBUF_VERSION/protoc-$PROTOBUF_VERSION-linux-ppcle_64.zip" + unzip "$PROTOBUF_DIR/protobuf" -d /usr/local ;; *) - wget -O $PROTOBUF_DIR/protobuf "https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/protobuf-cpp-$PROTOBUF_VERSION.zip" - unzip $PROTOBUF_DIR/protobuf -d /usr/src/protobuf - cd /usr/src/protobuf/protobuf-$PROTOBUF_VERSION + wget -O "$PROTOBUF_DIR/protobuf" "https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOBUF_VERSION/protobuf-cpp-$PROTOBUF_VERSION.zip" + unzip "$PROTOBUF_DIR/protobuf" -d /usr/src/protobuf + cd "/usr/src/protobuf/protobuf-$PROTOBUF_VERSION" ./autogen.sh ./configure --disable-shared make @@ -57,4 +57,4 @@ ppc64le) ldconfig ;; esac -rm -rf $PROTOBUF_DIR +rm -rf "$PROTOBUF_DIR" diff --git a/script/setup/install-runc b/script/setup/install-runc index 176ec9f..a03c395 100755 --- a/script/setup/install-runc +++ b/script/setup/install-runc @@ -20,26 +20,36 @@ # set -eu -o pipefail +script_dir="$(cd -- "$(dirname -- "$0")" > /dev/null 2>&1; pwd -P)" + +# e2e and Cirrus will fail with "sudo: command not found" +SUDO='' +if (( $EUID != 0 )); then + SUDO='sudo' +fi + function install_runc() { script_dir="$(cd -- "$(dirname -- "$0")" > /dev/null 2>&1; pwd -P)" # When updating runc-version, consider updating the runc module in go.mod as well : "${RUNC_VERSION:=$(cat "${script_dir}/runc-version")}" + : "${RUNC_REPO:=https://github.com/opencontainers/runc.git}" TMPROOT=$(mktemp -d) - git clone https://github.com/opencontainers/runc.git "${TMPROOT}"/runc + git clone "${RUNC_REPO}" "${TMPROOT}"/runc pushd "${TMPROOT}"/runc git checkout "${RUNC_VERSION}" make BUILDTAGS='seccomp' runc - make install + $SUDO make install popd rm -fR "${TMPROOT}" } function install_crun() { - CRUN_VERSION=0.19 - curl -o /usr/local/sbin/runc -L https://github.com/containers/crun/releases/download/"${CRUN_VERSION}"/crun-"${CRUN_VERSION}"-linux-"$(go env GOARCH)" - chmod +x /usr/local/sbin/runc + CRUN_VERSION=1.3 + : "${CRUN_REPO:=https://github.com/containers/crun}" + $SUDO curl -S -o /usr/local/sbin/runc -L "${CRUN_REPO}"/releases/download/"${CRUN_VERSION}"/crun-"${CRUN_VERSION}"-linux-"$(go env GOARCH)" + $SUDO chmod +x /usr/local/sbin/runc } : "${RUNC_FLAVOR:=runc}" diff --git a/script/setup/install-runhcs-shim b/script/setup/install-runhcs-shim new file mode 100755 index 0000000..674de52 --- /dev/null +++ b/script/setup/install-runhcs-shim @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +: ${RUNHCS_VERSION:="$(grep 'Microsoft/hcsshim ' go.mod | awk '{print $2}')"} +: ${RUNHCS_REPO:="https://github.com/Microsoft/hcsshim.git"} +: ${HCSSHIM_SRC:=''} +: ${DESTDIR:=''} +: ${GOOS:="windows"} + +tmpdir="$(mktemp -d)" + +cleanup() { + rm -rf "$tmpdir" +} + +trap 'cleanup' EXIT + +export GOOS +if [ "$HCSSHIM_SRC" == "" ] +then + set -e -x + cd "$tmpdir" + git init . + git remote add origin "$RUNHCS_REPO" + git fetch --tags --depth=1 origin ${RUNHCS_VERSION} +else + cd "${HCSSHIM_SRC}" +fi +git checkout "refs/tags/${RUNHCS_VERSION}" || git checkout "refs/heads/${RUNHCS_VERSION}" || git checkout "${RUNHCS_VERSION}" +GO111MODULE=on go build -mod=vendor -o "${DESTDIR}/containerd-shim-runhcs-v1.exe" ./cmd/containerd-shim-runhcs-v1 diff --git a/script/setup/install-seccomp b/script/setup/install-seccomp index 5122d84..8a9dfb4 100755 --- a/script/setup/install-seccomp +++ b/script/setup/install-seccomp @@ -23,14 +23,15 @@ set -eu -o pipefail set -x export SECCOMP_VERSION=2.5.1 -export SECCOMP_PATH=$(mktemp -d) +SECCOMP_PATH=$(mktemp -d) +export SECCOMP_PATH curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" | tar -xzC "$SECCOMP_PATH" --strip-components=1 ( cd "$SECCOMP_PATH" ./configure --prefix=/usr/local make - make install - ldconfig + sudo make install + sudo ldconfig ) rm -rf "$SECCOMP_PATH" diff --git a/script/setup/prepare_env_windows.ps1 b/script/setup/prepare_env_windows.ps1 new file mode 100644 index 0000000..5777f13 --- /dev/null +++ b/script/setup/prepare_env_windows.ps1 @@ -0,0 +1,51 @@ +# Prepare windows environment for building and running containerd tests + +# Disable Windows Defender real time monitoring. Real time monitoring consumes a lot of +# CPU and slows down tests as images are unarchived, and is not really needed in a short +# lived test environment. +Set-MpPreference -DisableRealtimeMonitoring:$true + +$PACKAGES= @{ mingw = "10.2.0"; git = ""; golang = "1.20.8"; make = ""; nssm = "" } + +Write-Host "Downloading chocolatey package" +curl.exe -L "https://packages.chocolatey.org/chocolatey.0.10.15.nupkg" -o 'c:\choco.zip' +Expand-Archive "c:\choco.zip" -DestinationPath "c:\choco" + +Write-Host "Installing choco" +& "c:\choco\tools\chocolateyInstall.ps1" + +Write-Host "Set choco.exe path." +$env:PATH+=";C:\ProgramData\chocolatey\bin" + +Write-Host "Install necessary packages" + +foreach ($package in $PACKAGES.Keys) { + $command = "choco.exe install $package --yes" + $version = $PACKAGES[$package] + if (-Not [string]::IsNullOrEmpty($version)) { + $command += " --version $version" + } + Invoke-Expression $command +} + +Write-Host "Set up environment." + +$userGoBin = "${env:HOME}\go\bin" +$path = ";c:\Program Files\Git\bin;c:\Program Files\Go\bin;${userGoBin};c:\containerd\bin" +$env:PATH+=$path + +Write-Host $env:PATH + +[Environment]::SetEnvironmentVariable("PATH", $env:PATH, 'User') + +# Prepare Log dir +mkdir c:\Logs + +# Pull junit conversion tool +go install github.com/jstemmer/go-junit-report@v0.9.1 + +# Get critctl tool. Used for cri-integration tests +$CRICTL_DOWNLOAD_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.21.0/crictl-v1.21.0-windows-amd64.tar.gz" +curl.exe -L $CRICTL_DOWNLOAD_URL -o c:\crictl.tar.gz +tar -xvf c:\crictl.tar.gz +mv crictl.exe "${userGoBin}\crictl.exe" # Move crictl somewhere in path diff --git a/script/setup/prepare_windows_docker_helper.ps1 b/script/setup/prepare_windows_docker_helper.ps1 new file mode 100644 index 0000000..dff9c06 --- /dev/null +++ b/script/setup/prepare_windows_docker_helper.ps1 @@ -0,0 +1,21 @@ +$ErrorActionPreference = "Stop" + +# Enable Hyper-V and management tools +Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V,Microsoft-Hyper-V-Management-Clients,Microsoft-Hyper-V-Management-PowerShell -All -NoRestart + +# Enable SSH (this can be skipped if you don't need it) +Add-WindowsCapability -Online -Name OpenSSH* + +# Install Docker +Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force -Confirm:$false +Install-Module -Name DockerMsftProvider -Repository PSGallery -Force -Confirm:$false +Install-Package -Name docker -ProviderName DockerMsftProvider -Force -Confirm:$false + +# Open SSH port +New-NetFirewallRule -Name 'OpenSSH-Server-In-TCP' -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 + +# Open Docker port +New-NetFirewallRule -Name 'Docker-TLS-In-TCP' -DisplayName 'Docker (TLS)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 2376 + +# Restart +Restart-Computer -Force diff --git a/script/setup/runc-version b/script/setup/runc-version index e946d6b..1573078 100644 --- a/script/setup/runc-version +++ b/script/setup/runc-version @@ -1 +1 @@ -v1.0.3 +v1.1.9 diff --git a/script/test/cri-integration.sh b/script/test/cri-integration.sh index 795ff8e..76cd738 100755 --- a/script/test/cri-integration.sh +++ b/script/test/cri-integration.sh @@ -14,34 +14,42 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -o errexit set -o nounset set -o pipefail -source $(dirname "${BASH_SOURCE[0]}")/utils.sh -cd ${ROOT} +basedir="$(dirname "${BASH_SOURCE[0]}")" +source "${basedir}/utils.sh" + +trap test_teardown EXIT + +ROOT="$( cd "${basedir}" && pwd )"/../.. +cd "${ROOT}" # FOCUS focuses the test to run. FOCUS=${FOCUS:-""} -# REPORT_DIR is the the directory to store test logs. -REPORT_DIR=${REPORT_DIR:-"/tmp/test-integration"} +# REPORT_DIR is the directory to store test logs. +if [ $IS_WINDOWS -eq 0 ]; then + REPORT_DIR=${REPORT_DIR:-"/tmp/test-integration"} +else + REPORT_DIR=${REPORT_DIR:-"C:/Windows/Temp/test-integration"} +fi # RUNTIME is the runtime handler to use in the test. RUNTIME=${RUNTIME:-""} CRI_ROOT="${CONTAINERD_ROOT}/io.containerd.grpc.v1.cri" - -mkdir -p ${REPORT_DIR} -test_setup ${REPORT_DIR} +mkdir -p "${REPORT_DIR}" +test_setup "${REPORT_DIR}" # Run integration test. -sudo PATH=${PATH} bin/cri-integration.test --test.run="${FOCUS}" --test.v \ - --cri-endpoint=${CONTAINERD_SOCK} \ - --cri-root=${CRI_ROOT} \ - --runtime-handler=${RUNTIME} \ - --containerd-bin=${CONTAINERD_BIN} \ - --image-list="${TEST_IMAGE_LIST:-}" +${sudo} bin/cri-integration.test --test.run="${FOCUS}" --test.v \ + --cri-endpoint="${CONTAINERD_SOCK}" \ + --cri-root="${CRI_ROOT}" \ + --runtime-handler="${RUNTIME}" \ + --containerd-bin="${CONTAINERD_BIN}" \ + --image-list="${TEST_IMAGE_LIST:-}" && test_exit_code=$? || test_exit_code=$? -test_exit_code=$? - -test_teardown +test $test_exit_code -ne 0 && \ + cat "$REPORT_DIR/containerd.log" exit ${test_exit_code} diff --git a/script/test/utils.sh b/script/test/utils.sh index 6093bf8..a470e15 100755 --- a/script/test/utils.sh +++ b/script/test/utils.sh @@ -14,23 +14,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/../.. +IS_WINDOWS=0 +if [ -v "OS" ] && [ "${OS}" == "Windows_NT" ]; then + IS_WINDOWS=1 +fi # RESTART_WAIT_PERIOD is the period to wait before restarting containerd. RESTART_WAIT_PERIOD=${RESTART_WAIT_PERIOD:-10} -# CONTAINERD_FLAGS contains all containerd flags. -CONTAINERD_FLAGS="--log-level=debug " + +if [ $IS_WINDOWS -eq 0 ]; then + CONTAINERD_CONFIG_DIR=${CONTAINERD_CONFIG_DIR:-"/tmp"} +else + CONTAINERD_CONFIG_DIR=${CONTAINERD_CONFIG_DIR:-"c:/Windows/Temp"} +fi # Use a configuration file for containerd. CONTAINERD_CONFIG_FILE=${CONTAINERD_CONFIG_FILE:-""} # The runtime to use (ignored when CONTAINERD_CONFIG_FILE is set) CONTAINERD_RUNTIME=${CONTAINERD_RUNTIME:-""} if [ -z "${CONTAINERD_CONFIG_FILE}" ]; then - config_file="/tmp/containerd-config-cri.toml" + config_file="${CONTAINERD_CONFIG_DIR}/containerd-config-cri.toml" truncate --size 0 "${config_file}" + echo "version=2" >> ${config_file} + if command -v sestatus >/dev/null 2>&1; then cat >>${config_file} < /dev/null; then + sudo="sudo PATH=${PATH}" +fi + + +# The run_containerd function is a wrapper that will run the appropriate +# containerd command based on the OS we're running the tests on. This wrapper +# is needed if we plan to run the containerd command as part of a retry cycle +# as is the case on Linux, where we use the keepalive function. Using a wrapper +# allows us to avoid the need for eval, while allowing us to quote the paths +# to the state and root folders. This allows us to use paths that have spaces +# in them without erring out. +run_containerd() { + # not used on linux + if [ $# -gt 0 ]; then + local report_dir=$1 + fi + CMD="" + if [ ! -z "${sudo}" ]; then + CMD+="${sudo} " + fi + CMD+="${PWD}/bin/containerd" + + if [ $IS_WINDOWS -eq 0 ]; then + $CMD --log-level=debug \ + --config "${CONTAINERD_CONFIG_FILE}" \ + --address "${TRIMMED_CONTAINERD_SOCK}" \ + --state "${CONTAINERD_STATE}" \ + --root "${CONTAINERD_ROOT}" + else + # Note(gsamfira): On Windows, we register a containerd-test service which will run under + # LocalSystem. This user is part of the local Administrators group and should have all + # required permissions to successfully start containers. + # The --register-service parameter will do this for us. + $CMD --log-level=debug \ + --config "${CONTAINERD_CONFIG_FILE}" \ + --address "${TRIMMED_CONTAINERD_SOCK}" \ + --state "${CONTAINERD_STATE}" \ + --root "${CONTAINERD_ROOT}" \ + --log-file "${report_dir}/containerd.log" \ + --service-name containerd-test \ + --register-service + fi +} # test_setup starts containerd. test_setup() { @@ -75,11 +203,31 @@ test_setup() { set -m # Create containerd in a different process group # so that we can easily clean them up. - keepalive "sudo PATH=${PATH} bin/containerd ${CONTAINERD_FLAGS}" \ - ${RESTART_WAIT_PERIOD} &> ${report_dir}/containerd.log & - pid=$! + if [ $IS_WINDOWS -eq 0 ]; then + keepalive run_containerd \ + "${RESTART_WAIT_PERIOD}" &> "${report_dir}/containerd.log" & + pid=$! + else + if [ ! -d "${CONTAINERD_ROOT}" ]; then + # Create the containerd ROOT dir and set full access to be inherited for "CREATOR OWNER" + # on all subfolders and files. + mkdir -p "${CONTAINERD_ROOT}" + cmd.exe /c 'icacls.exe "'$(cygpath -w "${CONTAINERD_ROOT}")'" /grant "CREATOR OWNER":(OI)(CI)(IO)F /T' + fi + + run_containerd "$report_dir" + + # Set failure flag on the test service. This will restart the service + # in case of failure. + sc.exe failure containerd-test reset=0 actions=restart/1000 + sc.exe failureflag containerd-test 1 + + # it might still result in SERVICE_START_PENDING, but we can ignore it. + sc.exe start containerd-test || true + pid="1" # for teardown + fi set +m - containerd_groupid=$(ps -o pgid= -p ${pid}) + # Wait for containerd to be running by using the containerd client ctr to check the version # of the containerd server. Wait an increasing amount of time after each of five attempts local -r crictl_path=$(which crictl) @@ -87,26 +235,51 @@ test_setup() { echo "crictl is not in PATH" exit 1 fi - readiness_check "sudo bin/ctr --address ${CONTAINERD_SOCK#"unix://"} version" - readiness_check "sudo ${crictl_path} --runtime-endpoint=${CONTAINERD_SOCK} info" + readiness_check run_ctr + readiness_check run_crictl + # Show the config about cri plugin in log when it's ready + run_crictl } # test_teardown kills containerd. test_teardown() { - if [ -n "${containerd_groupid}" ]; then - sudo pkill -g ${containerd_groupid} + if [ -n "${pid}" ]; then + if [ $IS_WINDOWS -eq 1 ]; then + # Mark service for deletion. It will be deleted as soon as the service stops. + sc.exe delete containerd-test + # Stop the service + sc.exe stop containerd-test || true + else + pgid=$(ps -o pgid= -p "${pid}" || true) + if [ ! -z "${pgid}" ]; then + ${sudo} pkill -g ${pgid} + else + echo "pid(${pid}) not found, skipping pkill" + fi + fi fi } +run_ctr() { + ${sudo} ${PWD}/bin/ctr --address "${TRIMMED_CONTAINERD_SOCK}" version +} + +run_crictl() { + ${sudo} ${crictl_path} --runtime-endpoint="${CONTAINERD_SOCK}" info +} + # keepalive runs a command and keeps it alive. # keepalive process is eventually killed in test_teardown. keepalive() { + # The command may return non-zero and we want to continue this script. + # e.g. containerd receives SIGKILL + set +e local command=$1 - echo ${command} + echo "${command}" local wait_period=$2 while true; do ${command} - sleep ${wait_period} + sleep "${wait_period}" done } @@ -115,7 +288,7 @@ readiness_check() { local command=$1 local MAX_ATTEMPTS=5 local attempt_num=1 - until ${command} &> /dev/null || (( attempt_num == MAX_ATTEMPTS )) + until ${command} &>/dev/null || (( attempt_num == MAX_ATTEMPTS )) do echo "$attempt_num attempt \"$command\"! Trying again in $attempt_num seconds..." sleep $(( attempt_num++ )) diff --git a/script/verify-go-modules.sh b/script/verify-go-modules.sh new file mode 100755 index 0000000..62e8db5 --- /dev/null +++ b/script/verify-go-modules.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# verifies if the require and replace directives for two go.mod files are in sync +# +set -eu -o pipefail + +ROOT=$(dirname "${BASH_SOURCE}")/.. + +if [ "$#" -ne 1 ]; then + echo "Usage: $0 dir-for-second-go-mod" + exit 1 +fi + +if ! command -v jq &> /dev/null ; then + echo Please install jq + exit 1 +fi + +# Load the requires and replaces section in the root go.mod file +declare -A map_requires_1 +declare -A map_replaces_1 +pushd "${ROOT}" > /dev/null +while IFS='#' read -r key value +do + map_requires_1[$key]="$value" +done<<<$(go mod edit -json | jq -r '.Require[] | .Path + " # " + .Version') +while IFS='#' read -r key value +do + map_replaces_1[$key]="$value" +done<<<$(go mod edit -json | jq -r '.Replace[] | .Old.Path + " # " + .New.Path + " : " + .New.Version') +popd > /dev/null + +# Load the requires and replaces section in the other go.mod file +declare -A map_requires_2 +declare -A map_replaces_2 +pushd "${ROOT}/$1" > /dev/null +while IFS='#' read -r key value +do + map_requires_2[$key]="$value" +done<<<$(go mod edit -json | jq -r '.Require[] | .Path + " # " + .Version') +while IFS='#' read -r key value +do + map_replaces_2[$key]="$value" +done<<<$(go mod edit -json | jq -r '.Replace[] | .Old.Path + " # " + .New.Path + " : " + .New.Version') +popd > /dev/null + +# signal for errors later +ERRORS=0 + +# iterate through the second go.mod's require section and ensure that all items +# have the same values in the root go.mod replace section +for k in "${!map_requires_2[@]}" +do + if [ -v "map_requires_1[$k]" ]; then + if [ "${map_requires_2[$k]}" != "${map_requires_1[$k]}" ]; then + echo "${k} has different values in the go.mod files require section:" \ + "${map_requires_1[$k]} in root go.mod ${map_requires_2[$k]} in $1/go.mod" + ERRORS=$(( ERRORS + 1 )) + fi + fi +done + +# iterate through the second go.mod's replace section and ensure that all items +# have the same values in the root go.mod's replace section. Except for the +# containerd/containerd which we know will be different +for k in "${!map_replaces_2[@]}" +do + if [[ "${k}" == "github.com/containerd/containerd"* ]]; then + continue + fi + if [ -v "map_replaces_1[$k]" ]; then + if [ "${map_replaces_2[$k]}" != "${map_replaces_1[$k]}" ]; then + echo "${k} has different values in the go.mod files replace section:" \ + "${map_replaces_1[$k]} in root go.mod ${map_replaces_2[$k]} in $1/go.mod" + ERRORS=$(( ERRORS + 1 )) + fi + fi +done + +# iterate through the root go.mod's replace section and ensure that all the +# same items are present in the second go.mod's replace section and nothing is missing +for k in "${!map_replaces_1[@]}" +do + if [[ "${k}" == "github.com/containerd/containerd"* ]]; then + continue + fi + if [ ! -v "map_replaces_2[$k]" ]; then + echo "${k} has an entry in root go.mod replace section, but is missing from" \ + " replace section in $1/go.mod" + ERRORS=$(( ERRORS + 1 )) + fi +done + +if [ "$ERRORS" -ne 0 ]; then + echo "Found $ERRORS error(s)." + exit 1 +fi diff --git a/services.go b/services.go index d8fced5..e780e6c 100644 --- a/services.go +++ b/services.go @@ -55,13 +55,20 @@ func WithContentStore(contentStore content.Store) ServicesOpt { } } -// WithImageService sets the image service. -func WithImageService(imageService imagesapi.ImagesClient) ServicesOpt { +// WithImageClient sets the image service to use using an images client. +func WithImageClient(imageService imagesapi.ImagesClient) ServicesOpt { return func(s *services) { s.imageStore = NewImageStoreFromClient(imageService) } } +// WithImageStore sets the image store. +func WithImageStore(imageStore images.Store) ServicesOpt { + return func(s *services) { + s.imageStore = imageStore + } +} + // WithSnapshotters sets the snapshotters. func WithSnapshotters(snapshotters map[string]snapshots.Snapshotter) ServicesOpt { return func(s *services) { @@ -72,27 +79,41 @@ func WithSnapshotters(snapshotters map[string]snapshots.Snapshotter) ServicesOpt } } -// WithContainerService sets the container service. -func WithContainerService(containerService containersapi.ContainersClient) ServicesOpt { +// WithContainerClient sets the container service to use using a containers client. +func WithContainerClient(containerService containersapi.ContainersClient) ServicesOpt { return func(s *services) { s.containerStore = NewRemoteContainerStore(containerService) } } -// WithTaskService sets the task service. -func WithTaskService(taskService tasks.TasksClient) ServicesOpt { +// WithContainerStore sets the container store. +func WithContainerStore(containerStore containers.Store) ServicesOpt { + return func(s *services) { + s.containerStore = containerStore + } +} + +// WithTaskClient sets the task service to use from a tasks client. +func WithTaskClient(taskService tasks.TasksClient) ServicesOpt { return func(s *services) { s.taskService = taskService } } -// WithDiffService sets the diff service. -func WithDiffService(diffService diff.DiffClient) ServicesOpt { +// WithDiffClient sets the diff service to use from a diff client. +func WithDiffClient(diffService diff.DiffClient) ServicesOpt { return func(s *services) { s.diffService = NewDiffServiceFromClient(diffService) } } +// WithDiffService sets the diff store. +func WithDiffService(diffService DiffService) ServicesOpt { + return func(s *services) { + s.diffService = diffService + } +} + // WithEventService sets the event service. func WithEventService(eventService EventService) ServicesOpt { return func(s *services) { @@ -100,13 +121,20 @@ func WithEventService(eventService EventService) ServicesOpt { } } -// WithNamespaceService sets the namespace service. -func WithNamespaceService(namespaceService namespacesapi.NamespacesClient) ServicesOpt { +// WithNamespaceClient sets the namespace service using a namespaces client. +func WithNamespaceClient(namespaceService namespacesapi.NamespacesClient) ServicesOpt { return func(s *services) { s.namespaceStore = NewNamespaceStoreFromClient(namespaceService) } } +// WithNamespaceService sets the namespace service. +func WithNamespaceService(namespaceService namespaces.Store) ServicesOpt { + return func(s *services) { + s.namespaceStore = namespaceService + } +} + // WithLeasesService sets the lease service. func WithLeasesService(leasesService leases.Manager) ServicesOpt { return func(s *services) { @@ -114,9 +142,16 @@ func WithLeasesService(leasesService leases.Manager) ServicesOpt { } } -// WithIntrospectionService sets the introspection service. -func WithIntrospectionService(in introspectionapi.IntrospectionClient) ServicesOpt { +// WithIntrospectionClient sets the introspection service using an introspection client. +func WithIntrospectionClient(in introspectionapi.IntrospectionClient) ServicesOpt { return func(s *services) { s.introspectionService = introspection.NewIntrospectionServiceFromClient(in) } } + +// WithIntrospectionService sets the introspection service. +func WithIntrospectionService(in introspection.Service) ServicesOpt { + return func(s *services) { + s.introspectionService = in + } +} diff --git a/services/containers/helpers.go b/services/containers/helpers.go index dde4cae..aece9ca 100644 --- a/services/containers/helpers.go +++ b/services/containers/helpers.go @@ -25,6 +25,7 @@ func containersToProto(containers []containers.Container) []api.Container { var containerspb []api.Container for _, image := range containers { + image := image containerspb = append(containerspb, containerToProto(&image)) } diff --git a/services/containers/local.go b/services/containers/local.go index b133649..843512e 100644 --- a/services/containers/local.go +++ b/services/containers/local.go @@ -41,6 +41,7 @@ func init() { Type: plugin.ServicePlugin, ID: services.ContainersService, Requires: []plugin.Type{ + plugin.EventPlugin, plugin.MetadataPlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { @@ -48,12 +49,16 @@ func init() { if err != nil { return nil, err } + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, err + } db := m.(*metadata.DB) return &local{ Store: metadata.NewContainerStore(db), db: db, - publisher: ic.Events, + publisher: ep.(events.Publisher), }, nil }, }) diff --git a/services/containers/service.go b/services/containers/service.go index 77e8449..a8bfd96 100644 --- a/services/containers/service.go +++ b/services/containers/service.go @@ -18,13 +18,13 @@ package containers import ( "context" + "errors" "io" api "github.com/containerd/containerd/api/services/containers/v1" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "google.golang.org/grpc" ) diff --git a/services/content/contentserver/contentserver.go b/services/content/contentserver/contentserver.go index 7b6efdb..eb5855a 100644 --- a/services/content/contentserver/contentserver.go +++ b/services/content/contentserver/contentserver.go @@ -18,6 +18,7 @@ package contentserver import ( "context" + "fmt" "io" "sync" @@ -28,7 +29,6 @@ import ( ptypes "github.com/gogo/protobuf/types" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -383,7 +383,7 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { if req.Offset == 0 && ws.Offset > 0 { if err := wr.Truncate(req.Offset); err != nil { - return errors.Wrapf(err, "truncate failed") + return fmt.Errorf("truncate failed: %w", err) } msg.Offset = req.Offset } @@ -423,6 +423,10 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { return err } + if req.Action == api.WriteActionCommit { + return nil + } + req, err = session.Recv() if err != nil { if err == io.EOF { diff --git a/services/content/service.go b/services/content/service.go index 43320d5..4717e62 100644 --- a/services/content/service.go +++ b/services/content/service.go @@ -17,11 +17,12 @@ package content import ( + "errors" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" "github.com/containerd/containerd/services/content/contentserver" - "github.com/pkg/errors" ) func init() { diff --git a/services/content/store.go b/services/content/store.go index 3de91d3..016cd06 100644 --- a/services/content/store.go +++ b/services/content/store.go @@ -39,6 +39,7 @@ func init() { Type: plugin.ServicePlugin, ID: services.ContentService, Requires: []plugin.Type{ + plugin.EventPlugin, plugin.MetadataPlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { @@ -46,8 +47,12 @@ func init() { if err != nil { return nil, err } + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, err + } - s, err := newContentStore(m.(*metadata.DB).ContentStore(), ic.Events) + s, err := newContentStore(m.(*metadata.DB).ContentStore(), ep.(events.Publisher)) return s, err }, }) diff --git a/services/diff/local.go b/services/diff/local.go index f05b222..7d0f071 100644 --- a/services/diff/local.go +++ b/services/diff/local.go @@ -18,6 +18,7 @@ package diff import ( "context" + "fmt" diffapi "github.com/containerd/containerd/api/services/diff/v1" "github.com/containerd/containerd/api/types" @@ -27,7 +28,6 @@ import ( "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "google.golang.org/grpc" ) @@ -65,16 +65,16 @@ func init() { for i, n := range orderedNames { differp, ok := differs[n] if !ok { - return nil, errors.Errorf("needed differ not loaded: %s", n) + return nil, fmt.Errorf("needed differ not loaded: %s", n) } d, err := differp.Instance() if err != nil { - return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n) + return nil, fmt.Errorf("could not load required differ due plugin init error: %s: %w", n, err) } ordered[i], ok = d.(differ) if !ok { - return nil, errors.Errorf("differ does not implement Comparer and Applier interface: %s", n) + return nil, fmt.Errorf("differ does not implement Comparer and Applier interface: %s", n) } } diff --git a/services/diff/service.go b/services/diff/service.go index 369e8f8..4ac7809 100644 --- a/services/diff/service.go +++ b/services/diff/service.go @@ -18,11 +18,11 @@ package diff import ( "context" + "errors" diffapi "github.com/containerd/containerd/api/services/diff/v1" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" - "github.com/pkg/errors" "google.golang.org/grpc" ) diff --git a/services/diff/service_unix.go b/services/diff/service_unix.go index 04a85f7..defea4b 100644 --- a/services/diff/service_unix.go +++ b/services/diff/service_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/services/diff/service_windows.go b/services/diff/service_windows.go index 00584ec..2d19de0 100644 --- a/services/diff/service_windows.go +++ b/services/diff/service_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/services/events/service.go b/services/events/service.go index fc16848..3b5f811 100644 --- a/services/events/service.go +++ b/services/events/service.go @@ -18,6 +18,7 @@ package events import ( "context" + "fmt" api "github.com/containerd/containerd/api/services/events/v1" apittrpc "github.com/containerd/containerd/api/services/ttrpc/events/v1" @@ -27,7 +28,6 @@ import ( "github.com/containerd/containerd/plugin" "github.com/containerd/ttrpc" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "google.golang.org/grpc" ) @@ -35,8 +35,15 @@ func init() { plugin.Register(&plugin.Registration{ Type: plugin.GRPCPlugin, ID: "events", + Requires: []plugin.Type{ + plugin.EventPlugin, + }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - return NewService(ic.Events), nil + ep, err := ic.GetByID(plugin.EventPlugin, "exchange") + if err != nil { + return nil, err + } + return NewService(ep.(*exchange.Exchange)), nil }, }) } @@ -91,11 +98,11 @@ func (s *service) Subscribe(req *api.SubscribeRequest, srv api.Events_SubscribeS select { case ev := <-eventq: if err := srv.Send(toProto(ev)); err != nil { - return errors.Wrapf(err, "failed sending event to subscriber") + return fmt.Errorf("failed sending event to subscriber: %w", err) } case err := <-errq: if err != nil { - return errors.Wrapf(err, "subscription error") + return fmt.Errorf("subscription error: %w", err) } return nil diff --git a/services/images/helpers.go b/services/images/helpers.go index 2d4ec76..6b7b008 100644 --- a/services/images/helpers.go +++ b/services/images/helpers.go @@ -27,6 +27,7 @@ func imagesToProto(images []images.Image) []imagesapi.Image { var imagespb []imagesapi.Image for _, image := range images { + image := image imagespb = append(imagespb, imageToProto(&image)) } diff --git a/services/images/service.go b/services/images/service.go index 83d8021..fbfa749 100644 --- a/services/images/service.go +++ b/services/images/service.go @@ -18,12 +18,12 @@ package images import ( "context" + "errors" imagesapi "github.com/containerd/containerd/api/services/images/v1" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "google.golang.org/grpc" ) diff --git a/services/introspection/local.go b/services/introspection/local.go index 988f1e8..47388e4 100644 --- a/services/introspection/local.go +++ b/services/introspection/local.go @@ -18,7 +18,6 @@ package introspection import ( context "context" - "io/ioutil" "os" "path/filepath" "sync" @@ -42,12 +41,9 @@ func init() { ID: services.IntrospectionService, Requires: []plugin.Type{}, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - // this service works by using the plugin context up till the point - // this service is initialized. Since we require this service last, - // it should provide the full set of plugins. - pluginsPB := pluginsToPB(ic.GetAll()) + // this service fetches all plugins through the plugin set of the plugin context return &Local{ - plugins: pluginsPB, + plugins: ic.Plugins(), root: ic.Root, }, nil }, @@ -56,19 +52,19 @@ func init() { // Local is a local implementation of the introspection service type Local struct { - mu sync.Mutex - plugins []api.Plugin - root string + mu sync.Mutex + root string + plugins *plugin.Set + pluginCache []api.Plugin } var _ = (api.IntrospectionClient)(&Local{}) // UpdateLocal updates the local introspection service -func (l *Local) UpdateLocal(root string, plugins []api.Plugin) { +func (l *Local) UpdateLocal(root string) { l.mu.Lock() defer l.mu.Unlock() l.root = root - l.plugins = plugins } // Plugins returns the locally defined plugins @@ -96,7 +92,11 @@ func (l *Local) Plugins(ctx context.Context, req *api.PluginsRequest, _ ...grpc. func (l *Local) getPlugins() []api.Plugin { l.mu.Lock() defer l.mu.Unlock() - return l.plugins + plugins := l.plugins.GetAll() + if l.pluginCache == nil || len(plugins) != len(l.pluginCache) { + l.pluginCache = pluginsToPB(plugins) + } + return l.pluginCache } // Server returns the local server information @@ -114,7 +114,7 @@ func (l *Local) getUUID() (string, error) { l.mu.Lock() defer l.mu.Unlock() - data, err := ioutil.ReadFile(l.uuidPath()) + data, err := os.ReadFile(l.uuidPath()) if err != nil { if os.IsNotExist(err) { return l.generateUUID() @@ -138,7 +138,7 @@ func (l *Local) generateUUID() (string, error) { return "", err } uu := u.String() - if err := ioutil.WriteFile(path, []byte(uu), 0666); err != nil { + if err := os.WriteFile(path, []byte(uu), 0666); err != nil { return "", err } return uu, nil diff --git a/services/introspection/service.go b/services/introspection/service.go index b8c54af..c11b8dc 100644 --- a/services/introspection/service.go +++ b/services/introspection/service.go @@ -18,12 +18,12 @@ package introspection import ( context "context" + "errors" api "github.com/containerd/containerd/api/services/introspection/v1" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "google.golang.org/grpc" ) @@ -31,11 +31,8 @@ func init() { plugin.Register(&plugin.Registration{ Type: plugin.GRPCPlugin, ID: "introspection", - Requires: []plugin.Type{"*"}, + Requires: []plugin.Type{plugin.ServicePlugin}, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - // this service works by using the plugin context up till the point - // this service is initialized. Since we require this service last, - // it should provide the full set of plugins. plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err @@ -50,13 +47,11 @@ func init() { return nil, err } - allPluginsPB := pluginsToPB(ic.GetAll()) - localClient, ok := i.(*Local) if !ok { - return nil, errors.Errorf("Could not create a local client for introspection service") + return nil, errors.New("Could not create a local client for introspection service") } - localClient.UpdateLocal(ic.Root, allPluginsPB) + localClient.UpdateLocal(ic.Root) return &server{ local: localClient, diff --git a/services/leases/service.go b/services/leases/service.go index 8dcc9f7..75afa4f 100644 --- a/services/leases/service.go +++ b/services/leases/service.go @@ -18,8 +18,7 @@ package leases import ( "context" - - "google.golang.org/grpc" + "errors" api "github.com/containerd/containerd/api/services/leases/v1" "github.com/containerd/containerd/errdefs" @@ -27,7 +26,7 @@ import ( "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + "google.golang.org/grpc" ) func init() { diff --git a/services/namespaces/local.go b/services/namespaces/local.go index f50b653..c45ae80 100644 --- a/services/namespaces/local.go +++ b/services/namespaces/local.go @@ -40,6 +40,7 @@ func init() { Type: plugin.ServicePlugin, ID: services.NamespacesService, Requires: []plugin.Type{ + plugin.EventPlugin, plugin.MetadataPlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { @@ -47,9 +48,13 @@ func init() { if err != nil { return nil, err } + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, err + } return &local{ db: m.(*metadata.DB), - publisher: ic.Events, + publisher: ep.(events.Publisher), }, nil }, }) diff --git a/services/namespaces/service.go b/services/namespaces/service.go index d3c74a2..27d9775 100644 --- a/services/namespaces/service.go +++ b/services/namespaces/service.go @@ -18,12 +18,12 @@ package namespaces import ( "context" + "errors" api "github.com/containerd/containerd/api/services/namespaces/v1" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "google.golang.org/grpc" ) diff --git a/services/opt/path_unix.go b/services/opt/path_unix.go index b4d996c..7d21e0d 100644 --- a/services/opt/path_unix.go +++ b/services/opt/path_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/services/opt/service.go b/services/opt/service.go index 5ac7665..76280ba 100644 --- a/services/opt/service.go +++ b/services/opt/service.go @@ -22,7 +22,6 @@ import ( "path/filepath" "github.com/containerd/containerd/plugin" - "github.com/pkg/errors" ) // Config for the opt manager @@ -46,7 +45,7 @@ func init() { return nil, err } if err := os.Setenv("PATH", fmt.Sprintf("%s%c%s", bin, os.PathListSeparator, os.Getenv("PATH"))); err != nil { - return nil, errors.Wrapf(err, "set binary image directory in path %s", bin) + return nil, fmt.Errorf("set binary image directory in path %s: %w", bin, err) } lib := filepath.Join(path, "lib") @@ -54,7 +53,7 @@ func init() { return nil, err } if err := os.Setenv("LD_LIBRARY_PATH", fmt.Sprintf("%s%c%s", lib, os.PathListSeparator, os.Getenv("LD_LIBRARY_PATH"))); err != nil { - return nil, errors.Wrapf(err, "set binary lib directory in path %s", lib) + return nil, fmt.Errorf("set binary lib directory in path %s: %w", lib, err) } return &manager{}, nil }, diff --git a/services/server/config/config.go b/services/server/config/config.go index 6aafe36..4c475d4 100644 --- a/services/server/config/config.go +++ b/services/server/config/config.go @@ -17,12 +17,12 @@ package config import ( + "fmt" "path/filepath" "strings" "github.com/imdario/mergo" "github.com/pelletier/go-toml" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/containerd/containerd/errdefs" @@ -39,6 +39,8 @@ type Config struct { Root string `toml:"root"` // State is the path to a directory where containerd will store transient data State string `toml:"state"` + // TempDir is the path to a directory where to place containerd temporary files + TempDir string `toml:"temp"` // PluginDir is the directory for dynamic plugins to be stored PluginDir string `toml:"plugin_dir"` // GRPC configuration settings @@ -67,7 +69,7 @@ type Config struct { Timeouts map[string]string `toml:"timeouts"` // Imports are additional file path list to config files that can overwrite main config file fields Imports []string `toml:"imports"` - + // StreamProcessors configuration StreamProcessors map[string]StreamProcessor `toml:"stream_processors"` } @@ -97,22 +99,23 @@ func (c *Config) GetVersion() int { func (c *Config) ValidateV2() error { version := c.GetVersion() if version < 2 { - logrus.Warnf("deprecated version : `%d`, please switch to version `2`", version) + logrus.Warnf("containerd config version `%d` has been deprecated and will be removed in containerd v2.0, please switch to version `2`, "+ + "see https://github.com/containerd/containerd/blob/main/docs/PLUGINS.md#version-header", version) return nil } for _, p := range c.DisabledPlugins { if len(strings.Split(p, ".")) < 4 { - return errors.Errorf("invalid disabled plugin URI %q expect io.containerd.x.vx", p) + return fmt.Errorf("invalid disabled plugin URI %q expect io.containerd.x.vx", p) } } for _, p := range c.RequiredPlugins { if len(strings.Split(p, ".")) < 4 { - return errors.Errorf("invalid required plugin URI %q expect io.containerd.x.vx", p) + return fmt.Errorf("invalid required plugin URI %q expect io.containerd.x.vx", p) } } for p := range c.Plugins { if len(strings.Split(p, ".")) < 4 { - return errors.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p) + return fmt.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p) } } return nil @@ -122,6 +125,7 @@ func (c *Config) ValidateV2() error { type GRPCConfig struct { Address string `toml:"address"` TCPAddress string `toml:"tcp_address"` + TCPTLSCA string `toml:"tcp_tls_ca"` TCPTLSCert string `toml:"tcp_tls_cert"` TCPTLSKey string `toml:"tcp_tls_key"` UID int `toml:"uid"` @@ -198,7 +202,7 @@ func (bc *BoltConfig) Validate() error { case SharingPolicyShared, SharingPolicyIsolated: return nil default: - return errors.Wrapf(errdefs.ErrInvalidArgument, "unknown policy: %s", bc.ContentSharingPolicy) + return fmt.Errorf("unknown policy: %s: %w", bc.ContentSharingPolicy, errdefs.ErrInvalidArgument) } } @@ -221,7 +225,7 @@ func (c *Config) Decode(p *plugin.Registration) (interface{}, error) { // LoadConfig loads the containerd server config from the provided path func LoadConfig(path string, out *Config) error { if out == nil { - return errors.Wrapf(errdefs.ErrInvalidArgument, "argument out must not be nil") + return fmt.Errorf("argument out must not be nil: %w", errdefs.ErrInvalidArgument) } var ( @@ -263,7 +267,7 @@ func LoadConfig(path string, out *Config) error { err := out.ValidateV2() if err != nil { - return errors.Wrapf(err, "failed to load TOML from %s", path) + return fmt.Errorf("failed to load TOML from %s: %w", path, err) } return nil } @@ -274,11 +278,11 @@ func loadConfigFile(path string) (*Config, error) { file, err := toml.LoadFile(path) if err != nil { - return nil, errors.Wrapf(err, "failed to load TOML: %s", path) + return nil, fmt.Errorf("failed to load TOML: %s: %w", path, err) } if err := file.Unmarshal(config); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal TOML") + return nil, fmt.Errorf("failed to unmarshal TOML: %w", err) } return config, nil diff --git a/services/server/config/config_test.go b/services/server/config/config_test.go index 6775e84..49a10d9 100644 --- a/services/server/config/config_test.go +++ b/services/server/config/config_test.go @@ -17,7 +17,6 @@ package config import ( - "io/ioutil" "os" "path/filepath" "sort" @@ -62,12 +61,10 @@ func TestMergeConfigs(t *testing.T) { } func TestResolveImports(t *testing.T) { - tempDir, err := ioutil.TempDir("", "containerd_") - assert.NilError(t, err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() for _, filename := range []string{"config_1.toml", "config_2.toml", "test.toml"} { - err = ioutil.WriteFile(filepath.Join(tempDir, filename), []byte(""), 0600) + err := os.WriteFile(filepath.Join(tempDir, filename), []byte(""), 0600) assert.NilError(t, err) } @@ -96,12 +93,10 @@ root = "/var/lib/containerd" accepts = ["application/vnd.docker.image.rootfs.diff.tar.gzip"] path = "unpigz" ` - tempDir, err := ioutil.TempDir("", "containerd_") - assert.NilError(t, err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() path := filepath.Join(tempDir, "config.toml") - err = ioutil.WriteFile(path, []byte(data), 0600) + err := os.WriteFile(path, []byte(data), 0600) assert.NilError(t, err) var out Config @@ -128,14 +123,12 @@ imports = ["data2.toml"] disabled_plugins = ["io.containerd.v1.xyz"] ` - tempDir, err := ioutil.TempDir("", "containerd_") - assert.NilError(t, err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tempDir, "data1.toml"), []byte(data1), 0600) + err := os.WriteFile(filepath.Join(tempDir, "data1.toml"), []byte(data1), 0600) assert.NilError(t, err) - err = ioutil.WriteFile(filepath.Join(tempDir, "data2.toml"), []byte(data2), 0600) + err = os.WriteFile(filepath.Join(tempDir, "data2.toml"), []byte(data2), 0600) assert.NilError(t, err) var out Config @@ -158,14 +151,12 @@ imports = ["data2.toml", "data1.toml"] disabled_plugins = ["io.containerd.v1.xyz"] imports = ["data1.toml", "data2.toml"] ` - tempDir, err := ioutil.TempDir("", "containerd_") - assert.NilError(t, err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tempDir, "data1.toml"), []byte(data1), 0600) + err := os.WriteFile(filepath.Join(tempDir, "data1.toml"), []byte(data1), 0600) assert.NilError(t, err) - err = ioutil.WriteFile(filepath.Join(tempDir, "data2.toml"), []byte(data2), 0600) + err = os.WriteFile(filepath.Join(tempDir, "data2.toml"), []byte(data2), 0600) assert.NilError(t, err) var out Config @@ -190,12 +181,10 @@ version = 2 shim_debug = true ` - tempDir, err := ioutil.TempDir("", "containerd_") - assert.NilError(t, err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() path := filepath.Join(tempDir, "config.toml") - err = ioutil.WriteFile(path, []byte(data), 0600) + err := os.WriteFile(path, []byte(data), 0600) assert.NilError(t, err) var out Config @@ -216,12 +205,8 @@ func TestDecodePluginInV1Config(t *testing.T) { shim_debug = true ` - tempDir, err := ioutil.TempDir("", "containerd_") - assert.NilError(t, err) - defer os.RemoveAll(tempDir) - - path := filepath.Join(tempDir, "config.toml") - err = ioutil.WriteFile(path, []byte(data), 0600) + path := filepath.Join(t.TempDir(), "config.toml") + err := os.WriteFile(path, []byte(data), 0600) assert.NilError(t, err) var out Config diff --git a/services/server/namespace.go b/services/server/namespace.go new file mode 100644 index 0000000..99f5e33 --- /dev/null +++ b/services/server/namespace.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + + "github.com/containerd/containerd/namespaces" + "google.golang.org/grpc" +) + +func unaryNamespaceInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if ns, ok := namespaces.Namespace(ctx); ok { + // The above call checks the *incoming* metadata, this makes sure the outgoing metadata is also set + ctx = namespaces.WithNamespace(ctx, ns) + } + return handler(ctx, req) +} + +func streamNamespaceInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + ctx := ss.Context() + if ns, ok := namespaces.Namespace(ctx); ok { + // The above call checks the *incoming* metadata, this makes sure the outgoing metadata is also set + ctx = namespaces.WithNamespace(ctx, ns) + ss = &wrappedSSWithContext{ctx: ctx, ServerStream: ss} + } + + return handler(srv, ss) +} + +type wrappedSSWithContext struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedSSWithContext) Context() context.Context { + return w.ctx +} diff --git a/services/server/server.go b/services/server/server.go index b6171f4..28ce79a 100644 --- a/services/server/server.go +++ b/services/server/server.go @@ -18,13 +18,18 @@ package server import ( "context" + "crypto/tls" + "crypto/x509" + "errors" "expvar" + "fmt" "io" "net" "net/http" "net/http/pprof" "os" "path/filepath" + "runtime" "strings" "sync" "time" @@ -48,14 +53,24 @@ import ( "github.com/containerd/containerd/sys" "github.com/containerd/ttrpc" metrics "github.com/docker/go-metrics" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) +const ( + boltOpenTimeout = "io.containerd.timeout.bolt.open" +) + +func init() { + timeout.Set(boltOpenTimeout, 0) // set to 0 means to wait indefinitely for bolt.Open +} + // CreateTopLevelDirectories creates the top-level root and state directories. func CreateTopLevelDirectories(config *srvconfig.Config) error { switch { @@ -71,7 +86,28 @@ func CreateTopLevelDirectories(config *srvconfig.Config) error { return err } - return sys.MkdirAllWithACL(config.State, 0711) + if err := sys.MkdirAllWithACL(config.State, 0711); err != nil { + return err + } + + if config.TempDir != "" { + if err := sys.MkdirAllWithACL(config.TempDir, 0711); err != nil { + return err + } + if runtime.GOOS == "windows" { + // On Windows, the Host Compute Service (vmcompute) will read the + // TEMP/TMP setting from the calling process when creating the + // tempdir to extract an image layer to. This allows the + // administrator to align the tempdir location with the same volume + // as the snapshot dir to avoid a copy operation when moving the + // extracted layer to the snapshot dir location. + os.Setenv("TEMP", config.TempDir) + os.Setenv("TMP", config.TempDir) + } else { + os.Setenv("TMPDIR", config.TempDir) + } + } + return nil } // New creates and initializes a new containerd server @@ -82,7 +118,7 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { for key, sec := range config.Timeouts { d, err := time.ParseDuration(sec) if err != nil { - return nil, errors.Errorf("unable to parse %s into a time duration", sec) + return nil, fmt.Errorf("unable to parse %s into a time duration", sec) } timeout.Set(key, d) } @@ -95,8 +131,16 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { } serverOpts := []grpc.ServerOption{ - grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), - grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + otelgrpc.StreamServerInterceptor(), + grpc.StreamServerInterceptor(grpc_prometheus.StreamServerInterceptor), + streamNamespaceInterceptor, + )), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + otelgrpc.UnaryServerInterceptor(), + grpc.UnaryServerInterceptor(grpc_prometheus.UnaryServerInterceptor), + unaryNamespaceInterceptor, + )), } if config.GRPC.MaxRecvMsgSize > 0 { serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(config.GRPC.MaxRecvMsgSize)) @@ -111,27 +155,58 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { tcpServerOpts := serverOpts if config.GRPC.TCPTLSCert != "" { log.G(ctx).Info("setting up tls on tcp GRPC services...") - creds, err := credentials.NewServerTLSFromFile(config.GRPC.TCPTLSCert, config.GRPC.TCPTLSKey) + + tlsCert, err := tls.LoadX509KeyPair(config.GRPC.TCPTLSCert, config.GRPC.TCPTLSKey) if err != nil { return nil, err } - tcpServerOpts = append(tcpServerOpts, grpc.Creds(creds)) + tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} + + if config.GRPC.TCPTLSCA != "" { + caCertPool := x509.NewCertPool() + caCert, err := os.ReadFile(config.GRPC.TCPTLSCA) + if err != nil { + return nil, fmt.Errorf("failed to load CA file: %w", err) + } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.ClientCAs = caCertPool + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } + + tcpServerOpts = append(tcpServerOpts, grpc.Creds(credentials.NewTLS(tlsConfig))) } + + // grpcService allows GRPC services to be registered with the underlying server + type grpcService interface { + Register(*grpc.Server) error + } + + // tcpService allows GRPC services to be registered with the underlying tcp server + type tcpService interface { + RegisterTCP(*grpc.Server) error + } + + // ttrpcService allows TTRPC services to be registered with the underlying server + type ttrpcService interface { + RegisterTTRPC(*ttrpc.Server) error + } + var ( grpcServer = grpc.NewServer(serverOpts...) tcpServer = grpc.NewServer(tcpServerOpts...) - grpcServices []plugin.Service - tcpServices []plugin.TCPService - ttrpcServices []plugin.TTRPCService + grpcServices []grpcService + tcpServices []tcpService + ttrpcServices []ttrpcService s = &Server{ grpcServer: grpcServer, tcpServer: tcpServer, ttrpcServer: ttrpcServer, - events: exchange.NewExchange(), config: config, } + // TODO: Remove this in 2.0 and let event plugin crease it + events = exchange.NewExchange() initialized = plugin.NewPluginSet() required = make(map[string]struct{}) ) @@ -153,9 +228,10 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { config.Root, config.State, ) - initContext.Events = s.events + initContext.Events = events initContext.Address = config.GRPC.Address initContext.TTRPCAddress = config.TTRPC.Address + initContext.RegisterReadiness = s.RegisterReadiness // load the plugin specific configuration if it is provided if p.Config != nil { @@ -167,7 +243,7 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { } result := p.Init(initContext) if err := initialized.Add(result); err != nil { - return nil, errors.Wrapf(err, "could not add plugin result to plugin set") + return nil, fmt.Errorf("could not add plugin result to plugin set: %w", err) } instance, err := result.Instance() @@ -178,20 +254,20 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) } if _, ok := required[reqID]; ok { - return nil, errors.Wrapf(err, "load required plugin %s", id) + return nil, fmt.Errorf("load required plugin %s: %w", id, err) } continue } delete(required, reqID) // check for grpc services that should be registered with the server - if src, ok := instance.(plugin.Service); ok { + if src, ok := instance.(grpcService); ok { grpcServices = append(grpcServices, src) } - if src, ok := instance.(plugin.TTRPCService); ok { + if src, ok := instance.(ttrpcService); ok { ttrpcServices = append(ttrpcServices, src) } - if service, ok := instance.(plugin.TCPService); ok { + if service, ok := instance.(tcpService); ok { tcpServices = append(tcpServices, service) } @@ -202,7 +278,7 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { for id := range required { missing = append(missing, id) } - return nil, errors.Errorf("required plugin %s not included", missing) + return nil, fmt.Errorf("required plugin %s not included", missing) } // register services after all plugins have been initialized @@ -229,9 +305,9 @@ type Server struct { grpcServer *grpc.Server ttrpcServer *ttrpc.Server tcpServer *grpc.Server - events *exchange.Exchange config *srvconfig.Config plugins []*plugin.Plugin + ready sync.WaitGroup } // ServeGRPC provides the containerd grpc APIs on the provided listener @@ -256,7 +332,11 @@ func (s *Server) ServeTTRPC(l net.Listener) error { func (s *Server) ServeMetrics(l net.Listener) error { m := http.NewServeMux() m.Handle("/v1/metrics", metrics.Handler()) - return trapClosedConnErr(http.Serve(l, m)) + srv := &http.Server{ + Handler: m, + ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout. + } + return trapClosedConnErr(srv.Serve(l)) } // ServeTCP allows services to serve over tcp @@ -276,7 +356,11 @@ func (s *Server) ServeDebug(l net.Listener) error { m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) - return trapClosedConnErr(http.Serve(l, m)) + srv := &http.Server{ + Handler: m, + ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout. + } + return trapClosedConnErr(srv.Serve(l)) } // Stop the containerd server canceling any open connections @@ -287,7 +371,7 @@ func (s *Server) Stop() { instance, err := p.Instance() if err != nil { log.L.WithError(err).WithField("id", p.Registration.URI()). - Errorf("could not get plugin instance") + Error("could not get plugin instance") continue } closer, ok := instance.(io.Closer) @@ -296,11 +380,22 @@ func (s *Server) Stop() { } if err := closer.Close(); err != nil { log.L.WithError(err).WithField("id", p.Registration.URI()). - Errorf("failed to close plugin") + Error("failed to close plugin") } } } +func (s *Server) RegisterReadiness() func() { + s.ready.Add(1) + return func() { + s.ready.Done() + } +} + +func (s *Server) Wait() { + s.ready.Wait() +} + // LoadPlugins loads all plugins into containerd and generates an ordered graph // of all plugins. func LoadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Registration, error) { @@ -376,8 +471,21 @@ func LoadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Regis path := filepath.Join(ic.Root, "meta.db") ic.Meta.Exports["path"] = path - - db, err := bolt.Open(path, 0644, nil) + options := *bolt.DefaultOptions + options.Timeout = timeout.Get(boltOpenTimeout) + doneCh := make(chan struct{}) + go func() { + t := time.NewTimer(10 * time.Second) + defer t.Stop() + select { + case <-t.C: + log.G(ctx).WithField("plugin", "bolt").Warn("waiting for response from boltdb open") + case <-doneCh: + return + } + }() + db, err := bolt.Open(path, 0644, &options) + close(doneCh) if err != nil { return nil, err } @@ -463,7 +571,7 @@ func (pc *proxyClients) getClient(address string) (*grpc.ClientConn, error) { Backoff: backoffConfig, } gopts := []grpc.DialOption{ - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithConnectParams(connParams), grpc.WithContextDialer(dialer.ContextDialer), @@ -474,7 +582,7 @@ func (pc *proxyClients) getClient(address string) (*grpc.ClientConn, error) { conn, err := grpc.Dial(dialer.DialAddress(address), gopts...) if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q", address) + return nil, fmt.Errorf("failed to dial %q: %w", address, err) } pc.clients[address] = conn diff --git a/services/server/server_linux.go b/services/server/server_linux.go index cbb2b0f..48d2684 100644 --- a/services/server/server_linux.go +++ b/services/server/server_linux.go @@ -41,12 +41,7 @@ func apply(ctx context.Context, config *srvconfig.Config) error { if cgroups.Mode() == cgroups.Unified { cg, err := cgroupsv2.LoadManager("/sys/fs/cgroup", config.Cgroup.Path) if err != nil { - if err != cgroupsv2.ErrCgroupDeleted { - return err - } - if cg, err = cgroupsv2.NewManager("/sys/fs/cgroup", config.Cgroup.Path, nil); err != nil { - return err - } + return err } if err := cg.AddProc(uint64(os.Getpid())); err != nil { return err diff --git a/services/server/server_unsupported.go b/services/server/server_unsupported.go index 80674e6..c3eb53b 100644 --- a/services/server/server_unsupported.go +++ b/services/server/server_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows && !solaris // +build !linux,!windows,!solaris /* diff --git a/services/server/server_windows.go b/services/server/server_windows.go index 8b569eb..4d7c060 100644 --- a/services/server/server_windows.go +++ b/services/server/server_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/services/snapshots/service.go b/services/snapshots/service.go index 1b58ac0..c6d711c 100644 --- a/services/snapshots/service.go +++ b/services/snapshots/service.go @@ -18,6 +18,7 @@ package snapshots import ( "context" + "errors" snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/api/types" @@ -28,7 +29,6 @@ import ( "github.com/containerd/containerd/services" "github.com/containerd/containerd/snapshots" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "google.golang.org/grpc" ) diff --git a/services/snapshots/snapshotters.go b/services/snapshots/snapshotters.go index 5da3651..6405cb6 100644 --- a/services/snapshots/snapshotters.go +++ b/services/snapshots/snapshotters.go @@ -39,6 +39,7 @@ func init() { Type: plugin.ServicePlugin, ID: services.SnapshotsService, Requires: []plugin.Type{ + plugin.EventPlugin, plugin.MetadataPlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { @@ -46,11 +47,15 @@ func init() { if err != nil { return nil, err } + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, err + } db := m.(*metadata.DB) ss := make(map[string]snapshots.Snapshotter) for n, sn := range db.Snapshotters() { - ss[n] = newSnapshotter(sn, ic.Events) + ss[n] = newSnapshotter(sn, ep.(events.Publisher)) } return ss, nil }, diff --git a/services/tasks/local.go b/services/tasks/local.go index a6014ba..96ed36c 100644 --- a/services/tasks/local.go +++ b/services/tasks/local.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -44,13 +43,11 @@ import ( "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - v2 "github.com/containerd/containerd/runtime/v2" "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/containerd/services" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -65,11 +62,18 @@ const ( stateTimeout = "io.containerd.timeout.task.state" ) +// Config for the tasks service plugin +type Config struct { + // RdtConfigFile specifies the path to RDT configuration file + RdtConfigFile string `toml:"rdt_config_file" json:"rdtConfigFile"` +} + func init() { plugin.Register(&plugin.Registration{ Type: plugin.ServicePlugin, ID: services.TasksService, Requires: tasksServiceRequires, + Config: &Config{}, InitFn: initFunc, }) @@ -77,12 +81,13 @@ func init() { } func initFunc(ic *plugin.InitContext) (interface{}, error) { + config := ic.Config.(*Config) runtimes, err := loadV1Runtimes(ic) if err != nil { return nil, err } - v2r, err := ic.Get(plugin.RuntimePluginV2) + v2r, err := ic.GetByID(plugin.RuntimePluginV2, "task") if err != nil { return nil, err } @@ -92,6 +97,11 @@ func initFunc(ic *plugin.InitContext) (interface{}, error) { return nil, err } + ep, err := ic.Get(plugin.EventPlugin) + if err != nil { + return nil, err + } + monitor, err := ic.Get(plugin.TaskMonitorPlugin) if err != nil { if !errdefs.IsNotFound(err) { @@ -105,9 +115,9 @@ func initFunc(ic *plugin.InitContext) (interface{}, error) { runtimes: runtimes, containers: metadata.NewContainerStore(db), store: db.ContentStore(), - publisher: ic.Events, + publisher: ep.(events.Publisher), monitor: monitor.(runtime.TaskMonitor), - v2Runtime: v2r.(*v2.TaskManager), + v2Runtime: v2r.(runtime.PlatformRuntime), } for _, r := range runtimes { tasks, err := r.Tasks(ic.Context, true) @@ -115,7 +125,7 @@ func initFunc(ic *plugin.InitContext) (interface{}, error) { return nil, err } for _, t := range tasks { - l.monitor.Monitor(t) + l.monitor.Monitor(t, nil) } } v2Tasks, err := l.v2Runtime.Tasks(ic.Context, true) @@ -123,8 +133,13 @@ func initFunc(ic *plugin.InitContext) (interface{}, error) { return nil, err } for _, t := range v2Tasks { - l.monitor.Monitor(t) + l.monitor.Monitor(t, nil) } + + if err := initRdt(config.RdtConfigFile); err != nil { + log.G(ic.Context).WithError(err).Errorf("RDT initialization failed") + } + return l, nil } @@ -135,7 +150,7 @@ type local struct { publisher events.Publisher monitor runtime.TaskMonitor - v2Runtime *v2.TaskManager + v2Runtime runtime.PlatformRuntime } func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc.CallOption) (*api.CreateTaskResponse, error) { @@ -149,7 +164,7 @@ func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc. } // jump get checkpointPath from checkpoint image if checkpointPath == "" && r.Checkpoint != nil { - checkpointPath, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "ctrd-checkpoint") + checkpointPath, err = os.MkdirTemp(os.Getenv("XDG_RUNTIME_DIR"), "ctrd-checkpoint") if err != nil { return nil, err } @@ -184,6 +199,9 @@ func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc. RuntimeOptions: container.Runtime.Options, TaskOptions: r.Options, } + if r.RuntimePath != "" { + opts.Runtime = r.RuntimePath + } for _, m := range r.Rootfs { opts.Rootfs = append(opts.Rootfs, mount.Mount{ Type: m.Type, @@ -205,18 +223,23 @@ func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc. return nil, errdefs.ToGRPC(err) } if err == nil { - return nil, errdefs.ToGRPC(fmt.Errorf("task %s already exists", r.ContainerID)) + return nil, errdefs.ToGRPC(fmt.Errorf("task %s: %w", r.ContainerID, errdefs.ErrAlreadyExists)) } c, err := rtime.Create(ctx, r.ContainerID, opts) if err != nil { return nil, errdefs.ToGRPC(err) } - if err := l.monitor.Monitor(c); err != nil { - return nil, errors.Wrap(err, "monitor task") + labels := map[string]string{"runtime": container.Runtime.Name} + if err := l.monitor.Monitor(c, labels); err != nil { + return nil, fmt.Errorf("monitor task: %w", err) + } + pid, err := c.PID(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get task pid: %w", err) } return &api.CreateTaskResponse{ ContainerID: r.ContainerID, - Pid: c.PID(), + Pid: pid, }, nil } @@ -244,17 +267,32 @@ func (l *local) Start(ctx context.Context, r *api.StartRequest, _ ...grpc.CallOp } func (l *local) Delete(ctx context.Context, r *api.DeleteTaskRequest, _ ...grpc.CallOption) (*api.DeleteResponse, error) { - t, err := l.getTask(ctx, r.ContainerID) + container, err := l.getContainer(ctx, r.ContainerID) if err != nil { return nil, err } + + // Find runtime manager + rtime, err := l.getRuntime(container.Runtime.Name) + if err != nil { + return nil, err + } + + // Get task object + t, err := rtime.Get(ctx, container.ID) + if err != nil { + return nil, status.Errorf(codes.NotFound, "task %v not found", container.ID) + } + if err := l.monitor.Stop(t); err != nil { return nil, err } - exit, err := t.Delete(ctx) + + exit, err := rtime.Delete(ctx, r.ContainerID) if err != nil { return nil, errdefs.ToGRPC(err) } + return &api.DeleteResponse{ ExitStatus: exit.Status, ExitedAt: exit.Timestamp, @@ -425,7 +463,7 @@ func (l *local) ListPids(ctx context.Context, r *api.ListPidsRequest, _ ...grpc. if p.Info != nil { a, err := typeurl.MarshalAny(p.Info) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", p.Pid) + return nil, fmt.Errorf("failed to marshal process %d info: %w", p.Pid, err) } pInfo.Info = a } @@ -513,7 +551,7 @@ func (l *local) Checkpoint(ctx context.Context, r *api.CheckpointTaskRequest, _ checkpointImageExists := false if image == "" { checkpointImageExists = true - image, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "ctd-checkpoint") + image, err = os.MkdirTemp(os.Getenv("XDG_RUNTIME_DIR"), "ctd-checkpoint") if err != nil { return nil, errdefs.ToGRPC(err) } diff --git a/vendor/github.com/containerd/cgroups/v2/rdma.go b/services/tasks/local_darwin.go similarity index 56% rename from vendor/github.com/containerd/cgroups/v2/rdma.go rename to services/tasks/local_darwin.go index 44caa4f..d3a3e80 100644 --- a/vendor/github.com/containerd/cgroups/v2/rdma.go +++ b/services/tasks/local_darwin.go @@ -1,3 +1,6 @@ +//go:build darwin +// +build darwin + /* Copyright The containerd Authors. @@ -14,33 +17,20 @@ limitations under the License. */ -package v2 +package tasks import ( - "fmt" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime" ) -type RDMA struct { - Limit []RDMAEntry +var tasksServiceRequires = []plugin.Type{ + plugin.RuntimePluginV2, + plugin.MetadataPlugin, + plugin.TaskMonitorPlugin, } -type RDMAEntry struct { - Device string - HcaHandles uint32 - HcaObjects uint32 -} - -func (r RDMAEntry) String() string { - return fmt.Sprintf("%s hca_handle=%d hca_object=%d", r.Device, r.HcaHandles, r.HcaObjects) -} - -func (r *RDMA) Values() (o []Value) { - for _, e := range r.Limit { - o = append(o, Value{ - filename: "rdma.max", - value: e.String(), - }) - } - - return o +// loadV1Runtimes on darwin returns an empty map. There are no v1 runtimes +func loadV1Runtimes(ic *plugin.InitContext) (map[string]runtime.PlatformRuntime, error) { + return make(map[string]runtime.PlatformRuntime), nil } diff --git a/services/tasks/local_freebsd.go b/services/tasks/local_freebsd.go index d206b8f..c0b57ed 100644 --- a/services/tasks/local_freebsd.go +++ b/services/tasks/local_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. @@ -24,6 +22,7 @@ import ( ) var tasksServiceRequires = []plugin.Type{ + plugin.EventPlugin, plugin.RuntimePluginV2, plugin.MetadataPlugin, plugin.TaskMonitorPlugin, diff --git a/services/tasks/local_unix.go b/services/tasks/local_unix.go index 2879df0..df381a8 100644 --- a/services/tasks/local_unix.go +++ b/services/tasks/local_unix.go @@ -1,4 +1,5 @@ -// +build !windows,!freebsd +//go:build !windows && !freebsd && !darwin +// +build !windows,!freebsd,!darwin /* Copyright The containerd Authors. @@ -19,13 +20,15 @@ package tasks import ( + "errors" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" - "github.com/pkg/errors" ) var tasksServiceRequires = []plugin.Type{ + plugin.EventPlugin, plugin.RuntimePlugin, plugin.RuntimePluginV2, plugin.MetadataPlugin, diff --git a/services/tasks/local_windows.go b/services/tasks/local_windows.go index d12f9e4..90b1ed9 100644 --- a/services/tasks/local_windows.go +++ b/services/tasks/local_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -24,6 +22,7 @@ import ( ) var tasksServiceRequires = []plugin.Type{ + plugin.EventPlugin, plugin.RuntimePluginV2, plugin.MetadataPlugin, plugin.TaskMonitorPlugin, diff --git a/vendor/github.com/containerd/continuity/testutil/helpers.go b/services/tasks/rdt_default.go similarity index 78% rename from vendor/github.com/containerd/continuity/testutil/helpers.go rename to services/tasks/rdt_default.go index 603d7a6..05007dc 100644 --- a/vendor/github.com/containerd/continuity/testutil/helpers.go +++ b/services/tasks/rdt_default.go @@ -1,3 +1,6 @@ +//go:build !linux || no_rdt +// +build !linux no_rdt + /* Copyright The containerd Authors. @@ -14,14 +17,8 @@ limitations under the License. */ -package testutil +package tasks -import ( - "flag" -) +func RdtEnabled() bool { return false } -var rootEnabled bool - -func init() { - flag.BoolVar(&rootEnabled, "test.root", false, "enable tests that require root") -} +func initRdt(configFilePath string) error { return nil } diff --git a/services/tasks/rdt_linux.go b/services/tasks/rdt_linux.go new file mode 100644 index 0000000..78f0d69 --- /dev/null +++ b/services/tasks/rdt_linux.go @@ -0,0 +1,59 @@ +//go:build linux && !no_rdt +// +build linux,!no_rdt + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tasks + +import ( + "fmt" + + "github.com/containerd/containerd/log" + + "github.com/intel/goresctrl/pkg/rdt" +) + +const ( + // ResctrlPrefix is the prefix used for class/closid directories under the resctrl filesystem + ResctrlPrefix = "" +) + +var rdtEnabled bool + +func RdtEnabled() bool { return rdtEnabled } + +func initRdt(configFilePath string) error { + rdtEnabled = false + + if configFilePath == "" { + log.L.Debug("No RDT config file specified, RDT not configured") + return nil + } + + if err := rdt.Initialize(ResctrlPrefix); err != nil { + return fmt.Errorf("RDT not enabled: %w", err) + } + + if err := rdt.SetConfigFromFile(configFilePath, true); err != nil { + return err + } + + rdtEnabled = true + + return nil + +} diff --git a/services/tasks/service.go b/services/tasks/service.go index a92a9a0..f73ffb4 100644 --- a/services/tasks/service.go +++ b/services/tasks/service.go @@ -18,12 +18,12 @@ package tasks import ( "context" + "errors" api "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "google.golang.org/grpc" ) diff --git a/signals.go b/signals.go index ca64ecd..9d81eed 100644 --- a/signals.go +++ b/signals.go @@ -24,6 +24,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" + "github.com/moby/sys/signal" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -40,7 +41,7 @@ func GetStopSignal(ctx context.Context, container Container, defaultSignal sysca } if stopSignal, ok := labels[StopSignalLabel]; ok { - return ParseSignal(stopSignal) + return signal.ParseSignal(stopSignal) } return defaultSignal, nil @@ -48,7 +49,7 @@ func GetStopSignal(ctx context.Context, container Container, defaultSignal sysca // GetOCIStopSignal retrieves the stop signal specified in the OCI image config func GetOCIStopSignal(ctx context.Context, image Image, defaultSignal string) (string, error) { - _, err := ParseSignal(defaultSignal) + _, err := signal.ParseSignal(defaultSignal) if err != nil { return "", err } @@ -81,3 +82,12 @@ func GetOCIStopSignal(ctx context.Context, image Image, defaultSignal string) (s return config.StopSignal, nil } + +// ParseSignal parses a given string into a syscall.Signal +// the rawSignal can be a string with "SIG" prefix, +// or a signal number in string format. +// +// Deprecated: Use github.com/moby/sys/signal instead. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + return signal.ParseSignal(rawSignal) +} diff --git a/signals_unix.go b/signals_unix.go deleted file mode 100644 index 14916a9..0000000 --- a/signals_unix.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package containerd - -import ( - "fmt" - "strconv" - "strings" - "syscall" - - "golang.org/x/sys/unix" -) - -// ParseSignal parses a given string into a syscall.Signal -// the rawSignal can be a string with "SIG" prefix, -// or a signal number in string format. -func ParseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - return syscall.Signal(s), nil - } - signal := unix.SignalNum(strings.ToUpper(rawSignal)) - if signal == 0 { - return -1, fmt.Errorf("unknown signal %q", rawSignal) - } - return signal, nil -} diff --git a/signals_windows.go b/signals_windows.go deleted file mode 100644 index 0018e19..0000000 --- a/signals_windows.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package containerd - -import ( - "fmt" - "strconv" - "strings" - "syscall" - - "golang.org/x/sys/windows" -) - -var signalMap = map[string]syscall.Signal{ - "HUP": syscall.Signal(windows.SIGHUP), - "INT": syscall.Signal(windows.SIGINT), - "QUIT": syscall.Signal(windows.SIGQUIT), - "SIGILL": syscall.Signal(windows.SIGILL), - "TRAP": syscall.Signal(windows.SIGTRAP), - "ABRT": syscall.Signal(windows.SIGABRT), - "BUS": syscall.Signal(windows.SIGBUS), - "FPE": syscall.Signal(windows.SIGFPE), - "KILL": syscall.Signal(windows.SIGKILL), - "SEGV": syscall.Signal(windows.SIGSEGV), - "PIPE": syscall.Signal(windows.SIGPIPE), - "ALRM": syscall.Signal(windows.SIGALRM), - "TERM": syscall.Signal(windows.SIGTERM), -} - -// ParseSignal parses a given string into a syscall.Signal -// the rawSignal can be a string with "SIG" prefix, -// or a signal number in string format. -func ParseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - sig := syscall.Signal(s) - for _, msig := range signalMap { - if sig == msig { - return sig, nil - } - } - return -1, fmt.Errorf("unknown signal %q", rawSignal) - } - signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("unknown signal %q", rawSignal) - } - return signal, nil -} diff --git a/snapshots/benchsuite/benchmark.go b/snapshots/benchsuite/benchmark.go index be17d5e..6b37bda 100644 --- a/snapshots/benchsuite/benchmark.go +++ b/snapshots/benchsuite/benchmark.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/snapshots/benchsuite/benchmark_test.go b/snapshots/benchsuite/benchmark_test.go index 40514d5..1bc4cc6 100644 --- a/snapshots/benchsuite/benchmark_test.go +++ b/snapshots/benchsuite/benchmark_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -30,7 +31,6 @@ import ( "time" "github.com/containerd/continuity/fs/fstest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" @@ -286,7 +286,7 @@ func updateFile(name string) applierFn { path := filepath.Join(root, name) file, err := os.OpenFile(path, os.O_WRONLY, 0600) if err != nil { - return errors.Wrapf(err, "failed to open %q", path) + return fmt.Errorf("failed to open %q: %w", path, err) } info, err := file.Stat() @@ -304,7 +304,7 @@ func updateFile(name string) applierFn { } if _, err := file.WriteAt(buf, offset); err != nil { - return errors.Wrapf(err, "failed to write %q at offset %d", path, offset) + return fmt.Errorf("failed to write %q at offset %d: %w", path, offset, err) } return file.Close() diff --git a/snapshots/btrfs/btrfs.go b/snapshots/btrfs/btrfs.go index dc274ee..0c4ba59 100644 --- a/snapshots/btrfs/btrfs.go +++ b/snapshots/btrfs/btrfs.go @@ -1,3 +1,4 @@ +//go:build linux && !no_btrfs && cgo // +build linux,!no_btrfs,cgo /* @@ -34,7 +35,6 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/storage" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -68,7 +68,7 @@ func NewSnapshotter(root string) (snapshots.Snapshotter, error) { return nil, err } if mnt.FSType != "btrfs" { - return nil, errors.Wrapf(plugin.ErrSkipPlugin, "path %s (%s) must be a btrfs filesystem to be used with the btrfs snapshotter", root, mnt.FSType) + return nil, fmt.Errorf("path %s (%s) must be a btrfs filesystem to be used with the btrfs snapshotter: %w", root, mnt.FSType, plugin.ErrSkipPlugin) } var ( active = filepath.Join(root, "active") @@ -274,7 +274,7 @@ func (b *snapshotter) mounts(dir string, s storage.Snapshot) ([]mount.Mount, err func (b *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) (err error) { usage, err := b.usage(ctx, key) if err != nil { - return errors.Wrap(err, "failed to compute usage") + return fmt.Errorf("failed to compute usage: %w", err) } ctx, t, err := b.ms.TransactionContext(ctx, true) @@ -291,7 +291,7 @@ func (b *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap id, err := storage.CommitActive(ctx, key, name, usage, opts...) // TODO(stevvooe): Resolve a usage value for btrfs if err != nil { - return errors.Wrap(err, "failed to commit") + return fmt.Errorf("failed to commit: %w", err) } source := filepath.Join(b.root, "active", id) @@ -330,7 +330,7 @@ func (b *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er s, err := storage.GetSnapshot(ctx, key) t.Rollback() if err != nil { - return nil, errors.Wrap(err, "failed to get active snapshot") + return nil, fmt.Errorf("failed to get active snapshot: %w", err) } dir := filepath.Join(b.root, strings.ToLower(s.Kind.String()), s.ID) @@ -365,7 +365,7 @@ func (b *snapshotter) Remove(ctx context.Context, key string) (err error) { id, k, err := storage.Remove(ctx, key) if err != nil { - return errors.Wrap(err, "failed to remove snapshot") + return fmt.Errorf("failed to remove snapshot: %w", err) } switch k { @@ -388,7 +388,7 @@ func (b *snapshotter) Remove(ctx context.Context, key string) (err error) { } if err := btrfs.SubvolDelete(source); err != nil { - return errors.Wrapf(err, "failed to remove snapshot %v", source) + return fmt.Errorf("failed to remove snapshot %v: %w", source, err) } err = t.Commit() diff --git a/snapshots/btrfs/btrfs_test.go b/snapshots/btrfs/btrfs_test.go index 0378940..6e5b246 100644 --- a/snapshots/btrfs/btrfs_test.go +++ b/snapshots/btrfs/btrfs_test.go @@ -1,3 +1,4 @@ +//go:build linux && !no_btrfs && cgo // +build linux,!no_btrfs,cgo /* @@ -19,10 +20,11 @@ package btrfs import ( + "bytes" "context" - "io/ioutil" + "errors" + "fmt" "os" - "os/exec" "path/filepath" "strings" "testing" @@ -34,7 +36,7 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/testsuite" "github.com/containerd/continuity/testutil/loopback" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -44,7 +46,10 @@ func boltSnapshotter(t *testing.T) func(context.Context, string) (snapshots.Snap t.Skipf("could not find mkfs.btrfs: %v", err) } - // TODO: Check for btrfs in /proc/module and skip if not loaded + procModules, err := os.ReadFile("/proc/modules") + if err == nil && !bytes.Contains(procModules, []byte("btrfs")) { + t.Skip("check for btrfs kernel module failed, skipping test") + } return func(ctx context.Context, root string) (snapshots.Snapshotter, func() error, error) { @@ -62,7 +67,7 @@ func boltSnapshotter(t *testing.T) func(context.Context, string) (snapshots.Snap if out, err := exec.Command(mkbtrfs, loop.Device).CombinedOutput(); err != nil { loop.Close() - return nil, nil, errors.Wrapf(err, "failed to make btrfs filesystem (out: %q)", out) + return nil, nil, fmt.Errorf("failed to make btrfs filesystem (out: %q): %w", out, err) } // sync after a mkfs on the loopback before trying to mount the device unix.Sync() @@ -71,7 +76,7 @@ func boltSnapshotter(t *testing.T) func(context.Context, string) (snapshots.Snap for i := 0; i < 5; i++ { if out, err := exec.Command("mount", loop.Device, root).CombinedOutput(); err != nil { loop.Close() - return nil, nil, errors.Wrapf(err, "failed to mount device %s (out: %q)", loop.Device, out) + return nil, nil, fmt.Errorf("failed to mount device %s (out: %q): %w", loop.Device, out, err) } if i > 0 { @@ -91,7 +96,7 @@ func boltSnapshotter(t *testing.T) func(context.Context, string) (snapshots.Snap unix.Unmount(root, 0) } if snapshotter == nil { - return nil, nil, errors.Wrap(err, "failed to successfully create snapshotter after 5 attempts") + return nil, nil, fmt.Errorf("failed to successfully create snapshotter after 5 attempts: %w", err) } return snapshotter, func() error { @@ -100,7 +105,7 @@ func boltSnapshotter(t *testing.T) func(context.Context, string) (snapshots.Snap } err := mount.UnmountAll(root, unix.MNT_DETACH) if cerr := loop.Close(); cerr != nil { - err = errors.Wrap(cerr, "device cleanup failed") + err = fmt.Errorf("device cleanup failed: %w", cerr) } return err }, nil @@ -116,19 +121,7 @@ func TestBtrfsMounts(t *testing.T) { testutil.RequiresRoot(t) ctx := context.Background() - // create temporary directory for mount point - mountPoint, err := ioutil.TempDir("", "containerd-btrfs-test") - if err != nil { - t.Fatal("could not create mount point for btrfs test", err) - } - defer os.RemoveAll(mountPoint) - t.Log("temporary mount point created", mountPoint) - - root, err := ioutil.TempDir(mountPoint, "TestBtrfsPrepare-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) + root := t.TempDir() b, c, err := boltSnapshotter(t)(ctx, root) if err != nil { @@ -163,7 +156,7 @@ func TestBtrfsMounts(t *testing.T) { defer testutil.Unmount(t, target) // write in some data - if err := ioutil.WriteFile(filepath.Join(target, "foo"), []byte("content"), 0777); err != nil { + if err := os.WriteFile(filepath.Join(target, "foo"), []byte("content"), 0777); err != nil { t.Fatal(err) } @@ -192,8 +185,15 @@ func TestBtrfsMounts(t *testing.T) { } defer testutil.Unmount(t, target) - // TODO(stevvooe): Verify contents of "foo" - if err := ioutil.WriteFile(filepath.Join(target, "bar"), []byte("content"), 0777); err != nil { + bs, err := os.ReadFile(filepath.Join(target, "foo")) + if err != nil { + t.Fatal(err) + } + if string(bs) != "content" { + t.Fatalf("wrong content in foo want: content, got: %s", bs) + } + + if err := os.WriteFile(filepath.Join(target, "bar"), []byte("content"), 0777); err != nil { t.Fatal(err) } diff --git a/snapshots/btrfs/plugin/plugin.go b/snapshots/btrfs/plugin/plugin.go index a70af58..7e39dc1 100644 --- a/snapshots/btrfs/plugin/plugin.go +++ b/snapshots/btrfs/plugin/plugin.go @@ -1,3 +1,4 @@ +//go:build linux && !no_btrfs && cgo // +build linux,!no_btrfs,cgo /* diff --git a/snapshots/devmapper/README.md b/snapshots/devmapper/README.md index aa578cd..ecdd8ce 100644 --- a/snapshots/devmapper/README.md +++ b/snapshots/devmapper/README.md @@ -10,10 +10,12 @@ This file is typically located at `/etc/containerd/config.toml`. Here's minimal sample entry that can be made in the configuration file: -``` +```toml +version = 2 + [plugins] ... - [plugins.devmapper] + [plugins."io.containerd.snapshotter.v1.devmapper"] pool_name = "containerd-pool" base_image_size = "8192MB" ... @@ -26,6 +28,9 @@ The following configuration flags are supported: should be the same as in `/dev/mapper/` directory * `base_image_size` - defines how much space to allocate when creating the base device * `async_remove` - flag to async remove device using snapshot GC's cleanup callback +* `discard_blocks` - whether to discard blocks when removing a device. This is especially useful for returning disk space to the filesystem when using loopback devices. +* `fs_type` - defines the file system to use for snapshot device mount. Valid values are `ext4` and `xfs`. Defaults to `ext4` if unspecified. +* `fs_options` - optionally defines the file system options. This is currently only applicable to `ext4` file system. Pool name and base image size are required snapshotter parameters. @@ -93,6 +98,7 @@ cat << EOF pool_name = "${POOL_NAME}" root_path = "${DATA_DIR}" base_image_size = "10GB" + discard_blocks = true EOF ``` diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/snapshots/devmapper/blkdiscard/blkdiscard.go similarity index 50% rename from vendor/github.com/containerd/console/tc_darwin.go rename to snapshots/devmapper/blkdiscard/blkdiscard.go index 7871545..384184a 100644 --- a/vendor/github.com/containerd/console/tc_darwin.go +++ b/snapshots/devmapper/blkdiscard/blkdiscard.go @@ -1,3 +1,6 @@ +//go:build linux +// +build linux + /* Copyright The containerd Authors. @@ -14,31 +17,26 @@ limitations under the License. */ -package console +package blkdiscard -import ( - "fmt" - "os" +import exec "golang.org/x/sys/execabs" - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - return unix.IoctlSetPointerInt(int(f.Fd()), unix.TIOCPTYUNLK, 0) +// Version returns the output of "blkdiscard --version" +func Version() (string, error) { + return blkdiscard("--version") } -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) +// BlkDiscard discards all blocks of a device. +// devicePath is expected to be a fully qualified path. +// BlkDiscard expects the caller to verify that the device is not in use. +func BlkDiscard(devicePath string) (string, error) { + return blkdiscard(devicePath) +} + +func blkdiscard(args ...string) (string, error) { + output, err := exec.Command("blkdiscard", args...).CombinedOutput() if err != nil { return "", err } - return fmt.Sprintf("/dev/pts/%d", n), nil + return string(output), nil } diff --git a/snapshots/devmapper/config.go b/snapshots/devmapper/config.go index 24e36b8..3358329 100644 --- a/snapshots/devmapper/config.go +++ b/snapshots/devmapper/config.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -25,7 +26,6 @@ import ( "github.com/docker/go-units" "github.com/hashicorp/go-multierror" "github.com/pelletier/go-toml" - "github.com/pkg/errors" ) // Config represents device mapper configuration loaded from file. @@ -43,6 +43,15 @@ type Config struct { // Flag to async remove device using Cleanup() callback in snapshots GC AsyncRemove bool `toml:"async_remove"` + + // Whether to discard blocks when removing a thin device. + DiscardBlocks bool `toml:"discard_blocks"` + + // Defines file system to use for snapshout device mount. Defaults to "ext4" + FileSystemType fsType `toml:"fs_type"` + + // Defines optional file system options passed through config file + FsOptions string `toml:"fs_options"` } // LoadConfig reads devmapper configuration file from disk in TOML format @@ -58,11 +67,11 @@ func LoadConfig(path string) (*Config, error) { config := Config{} file, err := toml.LoadFile(path) if err != nil { - return nil, errors.Wrapf(err, "failed to open devmapepr TOML: %s", path) + return nil, fmt.Errorf("failed to open devmapepr TOML: %s: %w", path, err) } if err := file.Unmarshal(&config); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal devmapper TOML") + return nil, fmt.Errorf("failed to unmarshal devmapper TOML: %w", err) } if err := config.parse(); err != nil { @@ -79,7 +88,11 @@ func LoadConfig(path string) (*Config, error) { func (c *Config) parse() error { baseImageSize, err := units.RAMInBytes(c.BaseImageSize) if err != nil { - return errors.Wrapf(err, "failed to parse base image size: '%s'", c.BaseImageSize) + return fmt.Errorf("failed to parse base image size: '%s': %w", c.BaseImageSize, err) + } + + if c.FileSystemType == "" { + c.FileSystemType = fsTypeExt4 } c.BaseImageSizeBytes = uint64(baseImageSize) @@ -102,5 +115,15 @@ func (c *Config) Validate() error { result = multierror.Append(result, fmt.Errorf("base_image_size is required")) } + if c.FileSystemType != "" { + switch c.FileSystemType { + case fsTypeExt4, fsTypeXFS: + default: + result = multierror.Append(result, fmt.Errorf("unsupported Filesystem Type: %q", c.FileSystemType)) + } + } else { + result = multierror.Append(result, fmt.Errorf("filesystem type cannot be empty")) + } + return result.ErrorOrNil() } diff --git a/snapshots/devmapper/config_test.go b/snapshots/devmapper/config_test.go index 02ad836..d156a89 100644 --- a/snapshots/devmapper/config_test.go +++ b/snapshots/devmapper/config_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,7 +20,6 @@ package devmapper import ( - "io/ioutil" "os" "testing" @@ -36,7 +36,7 @@ func TestLoadConfig(t *testing.T) { BaseImageSize: "128Mb", } - file, err := ioutil.TempFile("", "devmapper-config-") + file, err := os.CreateTemp("", "devmapper-config-") assert.NilError(t, err) encoder := toml.NewEncoder(file) @@ -84,18 +84,20 @@ func TestFieldValidation(t *testing.T) { assert.Assert(t, err != nil) multErr := (err).(*multierror.Error) - assert.Assert(t, is.Len(multErr.Errors, 3)) + assert.Assert(t, is.Len(multErr.Errors, 4)) assert.Assert(t, multErr.Errors[0] != nil, "pool_name is empty") assert.Assert(t, multErr.Errors[1] != nil, "root_path is empty") assert.Assert(t, multErr.Errors[2] != nil, "base_image_size is empty") + assert.Assert(t, multErr.Errors[3] != nil, "filesystem type cannot be empty") } func TestExistingPoolFieldValidation(t *testing.T) { config := &Config{ - PoolName: "test", - RootPath: "test", - BaseImageSize: "10mb", + PoolName: "test", + RootPath: "test", + BaseImageSize: "10mb", + FileSystemType: "ext4", } err := config.Validate() diff --git a/snapshots/devmapper/device_info.go b/snapshots/devmapper/device_info.go index e9dea34..37e0dbf 100644 --- a/snapshots/devmapper/device_info.go +++ b/snapshots/devmapper/device_info.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/snapshots/devmapper/dmsetup/dmsetup.go b/snapshots/devmapper/dmsetup/dmsetup.go index c3e3c78..b190efa 100644 --- a/snapshots/devmapper/dmsetup/dmsetup.go +++ b/snapshots/devmapper/dmsetup/dmsetup.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -16,17 +17,20 @@ limitations under the License. */ +// Copyright 2012-2017 Docker, Inc. + package dmsetup import ( + "errors" "fmt" "io" "os" - "os/exec" "strconv" "strings" - "github.com/pkg/errors" + blkdiscard "github.com/containerd/containerd/snapshots/devmapper/blkdiscard" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -37,6 +41,9 @@ const ( SectorSize = 512 ) +// ErrInUse represents an error mutating a device because it is in use elsewhere +var ErrInUse = errors.New("device is in use") + // DeviceInfo represents device info returned by "dmsetup info". // dmsetup(8) provides more information on each of these fields. type DeviceInfo struct { @@ -94,7 +101,7 @@ const ( func makeThinPoolMapping(dataFile, metaFile string, blockSizeSectors uint32) (string, error) { dataDeviceSizeBytes, err := BlockDeviceSize(dataFile) if err != nil { - return "", errors.Wrapf(err, "failed to get block device size: %s", dataFile) + return "", fmt.Errorf("failed to get block device size: %s: %w", dataFile, err) } // Thin-pool mapping target has the following format: @@ -252,7 +259,7 @@ func Info(deviceName string) ([]*DeviceInfo, error) { &info.EventNumber) if err != nil { - return nil, errors.Wrapf(err, "failed to parse line %q", line) + return nil, fmt.Errorf("failed to parse line %q: %w", line, err) } // Parse attributes (see "man 8 dmsetup" for details) @@ -302,17 +309,17 @@ func Status(deviceName string) (*DeviceStatus, error) { const MinParseCount = 4 parts := strings.Split(output, " ") if len(parts) < MinParseCount { - return nil, errors.Errorf("failed to parse output: %q", output) + return nil, fmt.Errorf("failed to parse output: %q", output) } status.Offset, err = strconv.ParseInt(parts[0], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "failed to parse offset: %q", parts[0]) + return nil, fmt.Errorf("failed to parse offset: %q: %w", parts[0], err) } status.Length, err = strconv.ParseInt(parts[1], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "failed to parse length: %q", parts[1]) + return nil, fmt.Errorf("failed to parse length: %q: %w", parts[1], err) } status.Target = parts[2] @@ -340,11 +347,30 @@ func BlockDeviceSize(path string) (int64, error) { size, err := f.Seek(0, io.SeekEnd) if err != nil { - return 0, errors.Wrapf(err, "failed to seek on %q", path) + return 0, fmt.Errorf("failed to seek on %q: %w", path, err) } return size, nil } +// DiscardBlocks discards all blocks for the given thin device +// +// ported from https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/pkg/devicemapper/devmapper.go#L416 +func DiscardBlocks(deviceName string) error { + inUse, err := isInUse(deviceName) + if err != nil { + return err + } + if inUse { + return ErrInUse + } + path := GetFullDevicePath(deviceName) + _, err = blkdiscard.BlkDiscard(path) + if err != nil { + return err + } + return nil +} + func dmsetup(args ...string) (string, error) { data, err := exec.Command("dmsetup", args...).CombinedOutput() output := string(data) @@ -354,7 +380,7 @@ func dmsetup(args ...string) (string, error) { return "", errno } - return "", errors.Wrapf(err, "dmsetup %s\nerror: %s\n", strings.Join(args, " "), output) + return "", fmt.Errorf("dmsetup %s\nerror: %s\n: %w", strings.Join(args, " "), output, err) } output = strings.TrimSuffix(output, "\n") @@ -377,8 +403,10 @@ func tryGetUnixError(output string) (unix.Errno, bool) { } // dmsetup returns error messages in format: -// device-mapper: message ioctl on failed: File exists\n -// Command failed\n +// +// device-mapper: message ioctl on failed: File exists\n +// Command failed\n +// // parseDmsetupError extracts text between "failed: " and "\n" func parseDmsetupError(output string) string { lines := strings.SplitN(output, "\n", 2) @@ -406,3 +434,14 @@ func parseDmsetupError(output string) string { str = strings.ToLower(str) return str } + +func isInUse(deviceName string) (bool, error) { + info, err := Info(deviceName) + if err != nil { + return true, err + } + if len(info) != 1 { + return true, errors.New("could not get device info") + } + return info[0].OpenCount != 0, nil +} diff --git a/snapshots/devmapper/dmsetup/dmsetup_test.go b/snapshots/devmapper/dmsetup/dmsetup_test.go index e8b9533..876bd1a 100644 --- a/snapshots/devmapper/dmsetup/dmsetup_test.go +++ b/snapshots/devmapper/dmsetup/dmsetup_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,7 +20,6 @@ package dmsetup import ( - "io/ioutil" "os" "strings" "testing" @@ -43,19 +43,13 @@ const ( func TestDMSetup(t *testing.T) { testutil.RequiresRoot(t) - tempDir, err := ioutil.TempDir("", "dmsetup-tests-") - assert.NilError(t, err, "failed to make temp dir for tests") - - defer func() { - err := os.RemoveAll(tempDir) - assert.NilError(t, err) - }() + tempDir := t.TempDir() dataImage, loopDataDevice := createLoopbackDevice(t, tempDir) metaImage, loopMetaDevice := createLoopbackDevice(t, tempDir) defer func() { - err = mount.DetachLoopDevice(loopDataDevice, loopMetaDevice) + err := mount.DetachLoopDevice(loopDataDevice, loopMetaDevice) assert.NilError(t, err, "failed to detach loop devices for data image: %s and meta image: %s", dataImage, metaImage) }() @@ -83,10 +77,11 @@ func TestDMSetup(t *testing.T) { t.Run("ActivateDevice", testActivateDevice) t.Run("DeviceStatus", testDeviceStatus) t.Run("SuspendResumeDevice", testSuspendResumeDevice) + t.Run("DiscardBlocks", testDiscardBlocks) t.Run("RemoveDevice", testRemoveDevice) t.Run("RemovePool", func(t *testing.T) { - err = RemoveDevice(testPoolName, RemoveWithForce, RemoveWithRetries) + err := RemoveDevice(testPoolName, RemoveWithForce, RemoveWithRetries) assert.NilError(t, err, "failed to remove thin-pool") }) @@ -169,6 +164,11 @@ func testSuspendResumeDevice(t *testing.T) { assert.NilError(t, err) } +func testDiscardBlocks(t *testing.T) { + err := DiscardBlocks(testDeviceName) + assert.NilError(t, err, "failed to discard blocks") +} + func testRemoveDevice(t *testing.T) { err := RemoveDevice(testPoolName) assert.Assert(t, err == unix.EBUSY, "removing thin-pool with dependencies shouldn't be allowed") @@ -184,7 +184,7 @@ func testVersion(t *testing.T) { } func createLoopbackDevice(t *testing.T, dir string) (string, string) { - file, err := ioutil.TempFile(dir, "dmsetup-tests-") + file, err := os.CreateTemp(dir, "dmsetup-tests-") assert.NilError(t, err) size, err := units.RAMInBytes("16Mb") diff --git a/snapshots/devmapper/metadata.go b/snapshots/devmapper/metadata.go index 01cdb79..6aab53a 100644 --- a/snapshots/devmapper/metadata.go +++ b/snapshots/devmapper/metadata.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -21,10 +22,10 @@ package devmapper import ( "context" "encoding/json" + "errors" "fmt" "strconv" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -69,7 +70,7 @@ func NewPoolMetadata(dbfile string) (*PoolMetadata, error) { metadata := &PoolMetadata{db: db} if err := metadata.ensureDatabaseInitialized(); err != nil { - return nil, errors.Wrap(err, "failed to initialize database") + return nil, fmt.Errorf("failed to initialize database: %w", err) } return metadata, nil @@ -101,7 +102,7 @@ func (m *PoolMetadata) AddDevice(ctx context.Context, info *DeviceInfo) error { // See https://github.com/containerd/containerd/pull/3436 for more context. var existing DeviceInfo if err := getObject(devicesBucket, info.Name, &existing); err == nil && existing.State != Faulty { - return errors.Wrapf(ErrAlreadyExists, "device %q is already there %+v", info.Name, existing) + return fmt.Errorf("device %q is already there %+v: %w", info.Name, existing, ErrAlreadyExists) } // Find next available device ID @@ -116,7 +117,7 @@ func (m *PoolMetadata) AddDevice(ctx context.Context, info *DeviceInfo) error { }) if err != nil { - return errors.Wrapf(err, "failed to save metadata for device %q (parent: %q)", info.Name, info.ParentName) + return fmt.Errorf("failed to save metadata for device %q (parent: %q): %w", info.Name, info.ParentName, err) } return nil @@ -192,7 +193,7 @@ func getNextDeviceID(tx *bolt.Tx) (uint32, error) { } if seq >= maxDeviceID { - return 0, errors.Errorf("dm-meta: couldn't find free device key") + return 0, errors.New("dm-meta: couldn't find free device key") } id := uint32(seq) @@ -212,7 +213,7 @@ func markDeviceID(tx *bolt.Tx, deviceID uint32, state deviceIDState) error { ) if err := bucket.Put([]byte(key), value); err != nil { - return errors.Wrapf(err, "failed to free device id %q", key) + return fmt.Errorf("failed to free device id %q: %w", key, err) } return nil @@ -281,7 +282,7 @@ func (m *PoolMetadata) RemoveDevice(ctx context.Context, name string) error { } if err := bucket.Delete([]byte(name)); err != nil { - return errors.Wrapf(err, "failed to delete device info for %q", name) + return fmt.Errorf("failed to delete device info for %q: %w", name, err) } return markDeviceID(tx, device.DeviceID, deviceFree) @@ -296,7 +297,7 @@ func (m *PoolMetadata) WalkDevices(ctx context.Context, cb func(info *DeviceInfo return bucket.ForEach(func(key, value []byte) error { device := &DeviceInfo{} if err := json.Unmarshal(value, device); err != nil { - return errors.Wrapf(err, "failed to unmarshal %s", key) + return fmt.Errorf("failed to unmarshal %s: %w", key, err) } return cb(device) @@ -339,16 +340,16 @@ func putObject(bucket *bolt.Bucket, key string, obj interface{}, overwrite bool) keyBytes := []byte(key) if !overwrite && bucket.Get(keyBytes) != nil { - return errors.Errorf("object with key %q already exists", key) + return fmt.Errorf("object with key %q already exists", key) } data, err := json.Marshal(obj) if err != nil { - return errors.Wrapf(err, "failed to marshal object with key %q", key) + return fmt.Errorf("failed to marshal object with key %q: %w", key, err) } if err := bucket.Put(keyBytes, data); err != nil { - return errors.Wrapf(err, "failed to insert object with key %q", key) + return fmt.Errorf("failed to insert object with key %q: %w", key, err) } return nil @@ -362,7 +363,7 @@ func getObject(bucket *bolt.Bucket, key string, obj interface{}) error { if obj != nil { if err := json.Unmarshal(data, obj); err != nil { - return errors.Wrapf(err, "failed to unmarshal object with key %q", key) + return fmt.Errorf("failed to unmarshal object with key %q: %w", key, err) } } diff --git a/snapshots/devmapper/metadata_test.go b/snapshots/devmapper/metadata_test.go index 0489a68..7069ee1 100644 --- a/snapshots/devmapper/metadata_test.go +++ b/snapshots/devmapper/metadata_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,13 +21,11 @@ package devmapper import ( "context" - "io/ioutil" - "os" + "errors" "path/filepath" "strconv" "testing" - "github.com/pkg/errors" "go.etcd.io/bbolt" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" @@ -37,8 +36,8 @@ var ( ) func TestPoolMetadata_AddDevice(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) expected := &DeviceInfo{ Name: "test2", @@ -62,8 +61,8 @@ func TestPoolMetadata_AddDevice(t *testing.T) { } func TestPoolMetadata_AddDeviceRollback(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) err := store.AddDevice(testCtx, &DeviceInfo{Name: ""}) assert.Assert(t, err != nil) @@ -73,8 +72,8 @@ func TestPoolMetadata_AddDeviceRollback(t *testing.T) { } func TestPoolMetadata_AddDeviceDuplicate(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) err := store.AddDevice(testCtx, &DeviceInfo{Name: "test"}) assert.NilError(t, err) @@ -84,8 +83,8 @@ func TestPoolMetadata_AddDeviceDuplicate(t *testing.T) { } func TestPoolMetadata_ReuseDeviceID(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) info1 := &DeviceInfo{Name: "test1"} err := store.AddDevice(testCtx, info1) @@ -109,8 +108,8 @@ func TestPoolMetadata_ReuseDeviceID(t *testing.T) { } func TestPoolMetadata_RemoveDevice(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) err := store.AddDevice(testCtx, &DeviceInfo{Name: "test"}) assert.NilError(t, err) @@ -123,8 +122,8 @@ func TestPoolMetadata_RemoveDevice(t *testing.T) { } func TestPoolMetadata_UpdateDevice(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) oldInfo := &DeviceInfo{ Name: "test1", @@ -155,8 +154,8 @@ func TestPoolMetadata_UpdateDevice(t *testing.T) { } func TestPoolMetadata_MarkFaulty(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) info := &DeviceInfo{Name: "test"} err := store.AddDevice(testCtx, info) @@ -182,8 +181,8 @@ func TestPoolMetadata_MarkFaulty(t *testing.T) { } func TestPoolMetadata_WalkDevices(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) err := store.AddDevice(testCtx, &DeviceInfo{Name: "device1", DeviceID: 1, State: Created}) assert.NilError(t, err) @@ -214,8 +213,8 @@ func TestPoolMetadata_WalkDevices(t *testing.T) { } func TestPoolMetadata_GetDeviceNames(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) err := store.AddDevice(testCtx, &DeviceInfo{Name: "test1"}) assert.NilError(t, err) @@ -231,21 +230,15 @@ func TestPoolMetadata_GetDeviceNames(t *testing.T) { assert.Equal(t, "test2", names[1]) } -func createStore(t *testing.T) (tempDir string, store *PoolMetadata) { - tempDir, err := ioutil.TempDir("", "pool-metadata-") - assert.NilError(t, err, "couldn't create temp directory for metadata tests") - - path := filepath.Join(tempDir, "test.db") +func createStore(t *testing.T) (store *PoolMetadata) { + path := filepath.Join(t.TempDir(), "test.db") metadata, err := NewPoolMetadata(path) assert.NilError(t, err) - return tempDir, metadata + return metadata } -func cleanupStore(t *testing.T, tempDir string, store *PoolMetadata) { +func cleanupStore(t *testing.T, store *PoolMetadata) { err := store.Close() assert.NilError(t, err, "failed to close metadata store") - - err = os.RemoveAll(tempDir) - assert.NilError(t, err, "failed to cleanup temp directory") } diff --git a/snapshots/devmapper/plugin/plugin.go b/snapshots/devmapper/plugin/plugin.go index ea8d305..403fd69 100644 --- a/snapshots/devmapper/plugin/plugin.go +++ b/snapshots/devmapper/plugin/plugin.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,10 +20,11 @@ package plugin import ( + "errors" + "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/snapshots/devmapper" - "github.com/pkg/errors" ) func init() { diff --git a/snapshots/devmapper/pool_device.go b/snapshots/devmapper/pool_device.go index 77a90bd..dc8120b 100644 --- a/snapshots/devmapper/pool_device.go +++ b/snapshots/devmapper/pool_device.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,22 +21,25 @@ package devmapper import ( "context" + "errors" + "fmt" "path/filepath" "strconv" "time" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/containerd/containerd/log" + blkdiscard "github.com/containerd/containerd/snapshots/devmapper/blkdiscard" "github.com/containerd/containerd/snapshots/devmapper/dmsetup" ) // PoolDevice ties together data and metadata volumes, represents thin-pool and manages volumes, snapshots and device ids. type PoolDevice struct { - poolName string - metadata *PoolMetadata + poolName string + metadata *PoolMetadata + discardBlocks bool } // NewPoolDevice creates new thin-pool from existing data and metadata volumes. @@ -45,12 +49,21 @@ func NewPoolDevice(ctx context.Context, config *Config) (*PoolDevice, error) { version, err := dmsetup.Version() if err != nil { - log.G(ctx).Errorf("dmsetup not available") + log.G(ctx).Error("dmsetup not available") return nil, err } log.G(ctx).Infof("using dmsetup:\n%s", version) + if config.DiscardBlocks { + blkdiscardVersion, err := blkdiscard.Version() + if err != nil { + log.G(ctx).Error("blkdiscard is not available") + return nil, err + } + log.G(ctx).Infof("using blkdiscard:\n%s", blkdiscardVersion) + } + dbpath := filepath.Join(config.RootPath, config.PoolName+".db") poolMetaStore, err := NewPoolMetadata(dbpath) if err != nil { @@ -60,21 +73,31 @@ func NewPoolDevice(ctx context.Context, config *Config) (*PoolDevice, error) { // Make sure pool exists and available poolPath := dmsetup.GetFullDevicePath(config.PoolName) if _, err := dmsetup.Info(poolPath); err != nil { - return nil, errors.Wrapf(err, "failed to query pool %q", poolPath) + return nil, fmt.Errorf("failed to query pool %q: %w", poolPath, err) } poolDevice := &PoolDevice{ - poolName: config.PoolName, - metadata: poolMetaStore, + poolName: config.PoolName, + metadata: poolMetaStore, + discardBlocks: config.DiscardBlocks, } if err := poolDevice.ensureDeviceStates(ctx); err != nil { - return nil, errors.Wrap(err, "failed to check devices state") + return nil, fmt.Errorf("failed to check devices state: %w", err) } return poolDevice, nil } +func skipRetry(err error) bool { + if err == nil { + return true // skip retry if no error + } else if !errors.Is(err, unix.EBUSY) { + return true // skip retry if error is not due to device or resource busy + } + return false +} + func retry(ctx context.Context, f func() error) error { var ( maxRetries = 100 @@ -84,9 +107,8 @@ func retry(ctx context.Context, f func() error) error { for attempt := 1; attempt <= maxRetries; attempt++ { retryErr = f() - if retryErr == nil { - return nil - } else if retryErr != unix.EBUSY { + + if skipRetry(retryErr) { return retryErr } @@ -121,7 +143,7 @@ func (p *PoolDevice) ensureDeviceStates(ctx context.Context) error { } return nil }); err != nil { - return errors.Wrap(err, "failed to query devices from metastore") + return fmt.Errorf("failed to query devices from metastore: %w", err) } var result *multierror.Error @@ -162,7 +184,7 @@ func (p *PoolDevice) transition(ctx context.Context, deviceName string, tryingSt }) if uerr != nil { - return errors.Wrapf(uerr, "failed to set device %q state to %q", deviceName, tryingState) + return fmt.Errorf("failed to set device %q state to %q: %w", deviceName, tryingState, uerr) } var result *multierror.Error @@ -280,7 +302,7 @@ func (p *PoolDevice) createDevice(ctx context.Context, info *DeviceInfo) error { if err := p.transition(ctx, info.Name, Creating, Created, func() error { return dmsetup.CreateDevice(p.poolName, info.DeviceID) }); err != nil { - return errors.Wrapf(err, "failed to create new thin device %q (dev: %d)", info.Name, info.DeviceID) + return fmt.Errorf("failed to create new thin device %q (dev: %d): %w", info.Name, info.DeviceID, err) } return nil @@ -291,7 +313,7 @@ func (p *PoolDevice) activateDevice(ctx context.Context, info *DeviceInfo) error if err := p.transition(ctx, info.Name, Activating, Activated, func() error { return dmsetup.ActivateDevice(p.poolName, info.Name, info.DeviceID, info.Size, "") }); err != nil { - return errors.Wrapf(err, "failed to activate new thin device %q (dev: %d)", info.Name, info.DeviceID) + return fmt.Errorf("failed to activate new thin device %q (dev: %d): %w", info.Name, info.DeviceID, err) } return nil @@ -301,7 +323,7 @@ func (p *PoolDevice) activateDevice(ctx context.Context, info *DeviceInfo) error func (p *PoolDevice) CreateSnapshotDevice(ctx context.Context, deviceName string, snapshotName string, virtualSizeBytes uint64) (retErr error) { baseInfo, err := p.metadata.GetDevice(ctx, deviceName) if err != nil { - return errors.Wrapf(err, "failed to query device metadata for %q", deviceName) + return fmt.Errorf("failed to query device metadata for %q: %w", deviceName, err) } snapInfo := &DeviceInfo{ @@ -373,12 +395,13 @@ func (p *PoolDevice) createSnapshot(ctx context.Context, baseInfo, snapInfo *Dev if err := p.transition(ctx, snapInfo.Name, Creating, Created, func() error { return dmsetup.CreateSnapshot(p.poolName, snapInfo.DeviceID, baseInfo.DeviceID) }); err != nil { - return errors.Wrapf(err, - "failed to create snapshot %q (dev: %d) from %q (dev: %d)", + return fmt.Errorf( + "failed to create snapshot %q (dev: %d) from %q (dev: %d): %w", snapInfo.Name, snapInfo.DeviceID, baseInfo.Name, - baseInfo.DeviceID) + baseInfo.DeviceID, err, + ) } return nil @@ -389,7 +412,7 @@ func (p *PoolDevice) SuspendDevice(ctx context.Context, deviceName string) error if err := p.transition(ctx, deviceName, Suspending, Suspended, func() error { return dmsetup.SuspendDevice(deviceName) }); err != nil { - return errors.Wrapf(err, "failed to suspend device %q", deviceName) + return fmt.Errorf("failed to suspend device %q: %w", deviceName, err) } return nil @@ -400,7 +423,7 @@ func (p *PoolDevice) ResumeDevice(ctx context.Context, deviceName string) error if err := p.transition(ctx, deviceName, Resuming, Resumed, func() error { return dmsetup.ResumeDevice(deviceName) }); err != nil { - return errors.Wrapf(err, "failed to resume device %q", deviceName) + return fmt.Errorf("failed to resume device %q: %w", deviceName, err) } return nil @@ -422,14 +445,24 @@ func (p *PoolDevice) DeactivateDevice(ctx context.Context, deviceName string, de if err := p.transition(ctx, deviceName, Deactivating, Deactivated, func() error { return retry(ctx, func() error { + if !deferred && p.discardBlocks { + err := dmsetup.DiscardBlocks(deviceName) + if err != nil { + if err == dmsetup.ErrInUse { + log.G(ctx).Warnf("device %q is in use, skipping blkdiscard", deviceName) + } else { + return err + } + } + } if err := dmsetup.RemoveDevice(deviceName, opts...); err != nil { - return errors.Wrap(err, "failed to deactivate device") + return fmt.Errorf("failed to deactivate device: %w", err) } return nil }) }); err != nil { - return errors.Wrapf(err, "failed to deactivate device %q", deviceName) + return fmt.Errorf("failed to deactivate device %q: %w", deviceName, err) } return nil @@ -459,21 +492,23 @@ func (p *PoolDevice) IsLoaded(deviceName string) bool { // GetUsage reports total size in bytes consumed by a thin-device. // It relies on the number of used blocks reported by 'dmsetup status'. // The output looks like: -// device2: 0 204800 thin 17280 204799 +// +// device2: 0 204800 thin 17280 204799 +// // Where 17280 is the number of used sectors func (p *PoolDevice) GetUsage(deviceName string) (int64, error) { status, err := dmsetup.Status(deviceName) if err != nil { - return 0, errors.Wrapf(err, "can't get status for device %q", deviceName) + return 0, fmt.Errorf("can't get status for device %q: %w", deviceName, err) } if len(status.Params) == 0 { - return 0, errors.Errorf("failed to get the number of used blocks, unexpected output from dmsetup status") + return 0, errors.New("failed to get the number of used blocks, unexpected output from dmsetup status") } count, err := strconv.ParseInt(status.Params[0], 10, 64) if err != nil { - return 0, errors.Wrapf(err, "failed to parse status params: %q", status.Params[0]) + return 0, fmt.Errorf("failed to parse status params: %q: %w", status.Params[0], err) } return count * dmsetup.SectorSize, nil @@ -483,7 +518,7 @@ func (p *PoolDevice) GetUsage(deviceName string) (int64, error) { func (p *PoolDevice) RemoveDevice(ctx context.Context, deviceName string) error { info, err := p.metadata.GetDevice(ctx, deviceName) if err != nil { - return errors.Wrapf(err, "can't query metadata for device %q", deviceName) + return fmt.Errorf("can't query metadata for device %q: %w", deviceName, err) } if err := p.DeactivateDevice(ctx, deviceName, false, true); err != nil { @@ -496,7 +531,7 @@ func (p *PoolDevice) RemoveDevice(ctx context.Context, deviceName string) error // Remove record from meta store and free device ID if err := p.metadata.RemoveDevice(ctx, deviceName); err != nil { - return errors.Wrapf(err, "can't remove device %q metadata from store after removal", deviceName) + return fmt.Errorf("can't remove device %q metadata from store after removal: %w", deviceName, err) } return nil @@ -514,7 +549,7 @@ func (p *PoolDevice) deleteDevice(ctx context.Context, info *DeviceInfo) error { return nil }) }); err != nil { - return errors.Wrapf(err, "failed to delete device %q (dev id: %d)", info.Name, info.DeviceID) + return fmt.Errorf("failed to delete device %q (dev id: %d): %w", info.Name, info.DeviceID, err) } return nil @@ -524,7 +559,7 @@ func (p *PoolDevice) deleteDevice(ctx context.Context, info *DeviceInfo) error { func (p *PoolDevice) RemovePool(ctx context.Context) error { deviceNames, err := p.metadata.GetDeviceNames(ctx) if err != nil { - return errors.Wrap(err, "can't query device names") + return fmt.Errorf("can't query device names: %w", err) } var result *multierror.Error @@ -532,12 +567,12 @@ func (p *PoolDevice) RemovePool(ctx context.Context) error { // Deactivate devices if any for _, name := range deviceNames { if err := p.DeactivateDevice(ctx, name, true, true); err != nil { - result = multierror.Append(result, errors.Wrapf(err, "failed to remove %q", name)) + result = multierror.Append(result, fmt.Errorf("failed to remove %q: %w", name, err)) } } if err := dmsetup.RemoveDevice(p.poolName, dmsetup.RemoveWithForce, dmsetup.RemoveWithRetries, dmsetup.RemoveDeferred); err != nil { - result = multierror.Append(result, errors.Wrapf(err, "failed to remove pool %q", p.poolName)) + result = multierror.Append(result, fmt.Errorf("failed to remove pool %q: %w", p.poolName, err)) } return result.ErrorOrNil() diff --git a/snapshots/devmapper/pool_device_test.go b/snapshots/devmapper/pool_device_test.go index 8525c00..c18df25 100644 --- a/snapshots/devmapper/pool_device_test.go +++ b/snapshots/devmapper/pool_device_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -21,9 +22,7 @@ package devmapper import ( "context" "fmt" - "io/ioutil" "os" - "os/exec" "path/filepath" "testing" "time" @@ -33,6 +32,7 @@ import ( "github.com/containerd/containerd/snapshots/devmapper/dmsetup" "github.com/docker/go-units" "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" "gotest.tools/v3/assert" ) @@ -40,8 +40,8 @@ const ( thinDevice1 = "thin-1" thinDevice2 = "thin-2" snapDevice1 = "snap-1" - device1Size = 100000 - device2Size = 200000 + device1Size = 1000000 + device2Size = 2000000 testsPrefix = "devmapper-snapshotter-tests-" ) @@ -61,23 +61,19 @@ func TestPoolDevice(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) ctx := context.Background() - tempDir, err := ioutil.TempDir("", "pool-device-test-") - assert.NilError(t, err, "couldn't get temp directory for testing") + tempDir := t.TempDir() _, loopDataDevice := createLoopbackDevice(t, tempDir) _, loopMetaDevice := createLoopbackDevice(t, tempDir) poolName := fmt.Sprintf("test-pool-device-%d", time.Now().Nanosecond()) - err = dmsetup.CreatePool(poolName, loopDataDevice, loopMetaDevice, 64*1024/dmsetup.SectorSize) + err := dmsetup.CreatePool(poolName, loopDataDevice, loopMetaDevice, 64*1024/dmsetup.SectorSize) assert.NilError(t, err, "failed to create pool %q", poolName) defer func() { // Detach loop devices and remove images err := mount.DetachLoopDevice(loopDataDevice, loopMetaDevice) assert.NilError(t, err) - - err = os.RemoveAll(tempDir) - assert.NilError(t, err, "couldn't cleanup temp directory") }() config := &Config{ @@ -85,6 +81,7 @@ func TestPoolDevice(t *testing.T) { RootPath: tempDir, BaseImageSize: "16mb", BaseImageSizeBytes: 16 * 1024 * 1024, + DiscardBlocks: true, } pool, err := NewPoolDevice(ctx, config) @@ -110,7 +107,7 @@ func TestPoolDevice(t *testing.T) { err = mount.WithTempMount(ctx, getMounts(thinDevice1), func(thin1MountPath string) error { // Write v1 test file on 'thin-1' device thin1TestFilePath := filepath.Join(thin1MountPath, "TEST") - err := ioutil.WriteFile(thin1TestFilePath, []byte("test file (v1)"), 0700) + err := os.WriteFile(thin1TestFilePath, []byte("test file (v1)"), 0700) assert.NilError(t, err, "failed to write test file v1 on '%s' volume", thinDevice1) return nil @@ -124,7 +121,7 @@ func TestPoolDevice(t *testing.T) { // Update TEST file on 'thin-1' to v2 err = mount.WithTempMount(ctx, getMounts(thinDevice1), func(thin1MountPath string) error { thin1TestFilePath := filepath.Join(thin1MountPath, "TEST") - err = ioutil.WriteFile(thin1TestFilePath, []byte("test file (v2)"), 0700) + err = os.WriteFile(thin1TestFilePath, []byte("test file (v2)"), 0700) assert.NilError(t, err, "failed to write test file v2 on 'thin-1' volume after taking snapshot") return nil @@ -135,7 +132,7 @@ func TestPoolDevice(t *testing.T) { // Mount 'snap-1' and make sure TEST file is v1 err = mount.WithTempMount(ctx, getMounts(snapDevice1), func(snap1MountPath string) error { // Read test file from snapshot device and make sure it's v1 - fileData, err := ioutil.ReadFile(filepath.Join(snap1MountPath, "TEST")) + fileData, err := os.ReadFile(filepath.Join(snap1MountPath, "TEST")) assert.NilError(t, err, "couldn't read test file from '%s' device", snapDevice1) assert.Equal(t, "test file (v1)", string(fileData), "test file content is invalid on snapshot") @@ -175,8 +172,8 @@ func TestPoolDevice(t *testing.T) { } func TestPoolDeviceMarkFaulty(t *testing.T) { - tempDir, store := createStore(t) - defer cleanupStore(t, tempDir, store) + store := createStore(t) + defer cleanupStore(t, store) err := store.AddDevice(testCtx, &DeviceInfo{Name: "1", State: Unknown}) assert.NilError(t, err) @@ -291,7 +288,7 @@ func getMounts(thinDeviceName string) []mount.Mount { } func createLoopbackDevice(t *testing.T, dir string) (string, string) { - file, err := ioutil.TempFile(dir, testsPrefix) + file, err := os.CreateTemp(dir, testsPrefix) assert.NilError(t, err) size, err := units.RAMInBytes("128Mb") diff --git a/snapshots/devmapper/snapshotter.go b/snapshots/devmapper/snapshotter.go index cfc471e..1e29779 100644 --- a/snapshots/devmapper/snapshotter.go +++ b/snapshots/devmapper/snapshotter.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,9 +21,9 @@ package devmapper import ( "context" + "errors" "fmt" "os" - "os/exec" "path/filepath" "strings" "sync" @@ -34,13 +35,17 @@ import ( "github.com/containerd/containerd/snapshots/devmapper/dmsetup" "github.com/containerd/containerd/snapshots/storage" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" ) +type fsType string + const ( - metadataFileName = "metadata.db" - fsTypeExt4 = "ext4" + metadataFileName = "metadata.db" + fsTypeExt4 fsType = "ext4" + fsTypeXFS fsType = "xfs" + devmapperSnapshotFsType = "containerd.io/snapshot/devmapper/fstype" ) type closeFunc func() error @@ -71,12 +76,12 @@ func NewSnapshotter(ctx context.Context, config *Config) (*Snapshotter, error) { var cleanupFn []closeFunc if err := os.MkdirAll(config.RootPath, 0750); err != nil && !os.IsExist(err) { - return nil, errors.Wrapf(err, "failed to create root directory: %s", config.RootPath) + return nil, fmt.Errorf("failed to create root directory: %s: %w", config.RootPath, err) } store, err := storage.NewMetaStore(filepath.Join(config.RootPath, metadataFileName)) if err != nil { - return nil, errors.Wrap(err, "failed to create metastore") + return nil, fmt.Errorf("failed to create metastore: %w", err) } cleanupFn = append(cleanupFn, store.Close) @@ -182,7 +187,13 @@ func (s *Snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er return err }) - return s.buildMounts(snap), nil + snapInfo, err := s.Stat(ctx, key) + if err != nil { + log.G(ctx).WithError(err).Errorf("cannot retrieve snapshot info for key %s", key) + return nil, err + } + + return s.buildMounts(ctx, snap, fsType(snapInfo.Labels[devmapperSnapshotFsType])), nil } // Prepare creates thin device for an active snapshot identified by key @@ -226,7 +237,7 @@ func (s *Snapshotter) Commit(ctx context.Context, name, key string, opts ...snap log.G(ctx).WithFields(logrus.Fields{"name": name, "key": key}).Debug("commit") return s.withTransaction(ctx, true, func(ctx context.Context) error { - id, _, _, err := storage.GetInfo(ctx, key) + id, snapInfo, _, err := storage.GetInfo(ctx, key) if err != nil { return err } @@ -241,6 +252,15 @@ func (s *Snapshotter) Commit(ctx context.Context, name, key string, opts ...snap Size: size, } + // Add file system type label if present. In case more than one file system + // type is supported file system type from parent will be used for creating + // snapshot. + fsTypeActive := snapInfo.Labels[devmapperSnapshotFsType] + if fsTypeActive != "" { + fsLabel := make(map[string]string) + fsLabel[devmapperSnapshotFsType] = fsTypeActive + opts = append(opts, snapshots.WithLabels(fsLabel)) + } _, err = storage.CommitActive(ctx, key, name, usage, opts...) if err != nil { return err @@ -287,7 +307,7 @@ func (s *Snapshotter) removeDevice(ctx context.Context, key string) error { deviceName := s.getDeviceName(snapID) if !s.config.AsyncRemove { if err := s.pool.RemoveDevice(ctx, deviceName); err != nil { - log.G(ctx).WithError(err).Errorf("failed to remove device") + log.G(ctx).WithError(err).Error("failed to remove device") // Tell snapshot GC continue to collect other snapshots. // Otherwise, one snapshot collection failure will stop // the GC, and all snapshots won't be collected even though @@ -298,7 +318,7 @@ func (s *Snapshotter) removeDevice(ctx context.Context, key string) error { // The asynchronous cleanup will do the real device remove work. log.G(ctx).WithField("device", deviceName).Debug("async remove") if err := s.pool.MarkDeviceState(ctx, deviceName, Removed); err != nil { - log.G(ctx).WithError(err).Errorf("failed to mark device as removed") + log.G(ctx).WithError(err).Error("failed to mark device as removed") return err } } @@ -350,12 +370,40 @@ func (s *Snapshotter) Close() error { } func (s *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + var fileSystemType fsType + + // For snapshots with no parents, we use file system type as configured in config. + // For snapshots with parents, we inherit the file system type. We use the same + // file system type derived here for building mount points later. + fsLabel := make(map[string]string) + if len(parent) == 0 { + fileSystemType = s.config.FileSystemType + } else { + _, snapInfo, _, err := storage.GetInfo(ctx, parent) + if err != nil { + log.G(ctx).Errorf("failed to read snapshotInfo for %s", parent) + return nil, err + } + fileSystemType = fsType(snapInfo.Labels[devmapperSnapshotFsType]) + if fileSystemType == "" { + // For parent snapshots created without label support, we can assume that + // they are ext4 type. Children of parents with no label for fsType will + // now have correct label and committed snapshots from them will carry fs type + // label. TODO: find out if it is better to update the parent's label with + // fsType as ext4. + fileSystemType = fsTypeExt4 + } + } + fsLabel[devmapperSnapshotFsType] = string(fileSystemType) + opts = append(opts, snapshots.WithLabels(fsLabel)) + snap, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) if err != nil { return nil, err } if len(snap.ParentIDs) == 0 { + fsOptions := "" deviceName := s.getDeviceName(snap.ID) log.G(ctx).Debugf("creating new thin device '%s'", deviceName) @@ -365,7 +413,14 @@ func (s *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k return nil, err } - if err := mkfs(ctx, dmsetup.GetFullDevicePath(deviceName)); err != nil { + if s.config.FileSystemType == fsTypeExt4 && s.config.FsOptions == "" { + // Explicitly disable lazy_itable_init and lazy_journal_init in order to enable lazy initialization. + fsOptions = "nodiscard,lazy_itable_init=0,lazy_journal_init=0" + } else { + fsOptions = s.config.FsOptions + } + log.G(ctx).Debugf("Creating file system of type: %s with options: %s for thin device %q", s.config.FileSystemType, fsOptions, deviceName) + if err := mkfs(ctx, s.config.FileSystemType, fsOptions, dmsetup.GetFullDevicePath(deviceName)); err != nil { status, sErr := dmsetup.Status(s.pool.poolName) if sErr != nil { multierror.Append(err, sErr) @@ -379,16 +434,17 @@ func (s *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } else { parentDeviceName := s.getDeviceName(snap.ParentIDs[0]) snapDeviceName := s.getDeviceName(snap.ID) - log.G(ctx).Debugf("creating snapshot device '%s' from '%s'", snapDeviceName, parentDeviceName) - err := s.pool.CreateSnapshotDevice(ctx, parentDeviceName, snapDeviceName, s.config.BaseImageSizeBytes) + log.G(ctx).Debugf("creating snapshot device '%s' from '%s' with fsType: '%s'", snapDeviceName, parentDeviceName, fileSystemType) + + err = s.pool.CreateSnapshotDevice(ctx, parentDeviceName, snapDeviceName, s.config.BaseImageSizeBytes) if err != nil { log.G(ctx).WithError(err).Errorf("failed to create snapshot device from parent %s", parentDeviceName) return nil, err } } - mounts := s.buildMounts(snap) + mounts := s.buildMounts(ctx, snap, fileSystemType) // Remove default directories not expected by the container image _ = mount.WithTempMount(ctx, mounts, func(root string) error { @@ -398,20 +454,34 @@ func (s *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k return mounts, nil } -// mkfs creates ext4 filesystem on the given devmapper device -func mkfs(ctx context.Context, path string) error { - args := []string{ - "-E", - // We don't want any zeroing in advance when running mkfs on thin devices (see "man mkfs.ext4") - "nodiscard,lazy_itable_init=0,lazy_journal_init=0", - path, +// mkfs creates filesystem on the given devmapper device based on type +// specified in config. +func mkfs(ctx context.Context, fs fsType, fsOptions string, path string) error { + mkfsCommand := "" + var args []string + + switch fs { + case fsTypeExt4: + mkfsCommand = "mkfs.ext4" + args = []string{ + "-E", + fsOptions, + path, + } + case fsTypeXFS: + mkfsCommand = "mkfs.xfs" + args = []string{ + path, + } + default: + return errors.New("file system not supported") } - log.G(ctx).Debugf("mkfs.ext4 %s", strings.Join(args, " ")) - b, err := exec.Command("mkfs.ext4", args...).CombinedOutput() + log.G(ctx).Debugf("%s %s", mkfsCommand, strings.Join(args, " ")) + b, err := exec.Command(mkfsCommand, args...).CombinedOutput() out := string(b) if err != nil { - return errors.Wrapf(err, "mkfs.ext4 couldn't initialize %q: %s", path, out) + return fmt.Errorf("%s couldn't initialize %q: %s: %w", mkfsCommand, path, out, err) } log.G(ctx).Debugf("mkfs:\n%s", out) @@ -428,9 +498,15 @@ func (s *Snapshotter) getDevicePath(snap storage.Snapshot) string { return dmsetup.GetFullDevicePath(name) } -func (s *Snapshotter) buildMounts(snap storage.Snapshot) []mount.Mount { +func (s *Snapshotter) buildMounts(ctx context.Context, snap storage.Snapshot, fileSystemType fsType) []mount.Mount { var options []string + if fileSystemType == "" { + log.G(ctx).Error("File system type cannot be empty") + return nil + } else if fileSystemType == fsTypeXFS { + options = append(options, "nouuid") + } if snap.Kind != snapshots.KindActive { options = append(options, "ro") } @@ -438,7 +514,7 @@ func (s *Snapshotter) buildMounts(snap storage.Snapshot) []mount.Mount { mounts := []mount.Mount{ { Source: s.getDevicePath(snap), - Type: fsTypeExt4, + Type: string(fileSystemType), Options: options, }, } @@ -465,12 +541,12 @@ func (s *Snapshotter) withTransaction(ctx context.Context, writable bool, fn fun if err != nil || !writable { if terr := trans.Rollback(); terr != nil { log.G(ctx).WithError(terr).Error("failed to rollback transaction") - result = multierror.Append(result, errors.Wrap(terr, "rollback failed")) + result = multierror.Append(result, fmt.Errorf("rollback failed: %w", terr)) } } else { if terr := trans.Commit(); terr != nil { log.G(ctx).WithError(terr).Error("failed to commit transaction") - result = multierror.Append(result, errors.Wrap(terr, "commit failed")) + result = multierror.Append(result, fmt.Errorf("commit failed: %w", terr)) } } @@ -504,7 +580,7 @@ func (s *Snapshotter) Cleanup(ctx context.Context) error { } return nil }); err != nil { - log.G(ctx).WithError(err).Errorf("failed to query devices from metastore") + log.G(ctx).WithError(err).Error("failed to query devices from metastore") return err } diff --git a/snapshots/devmapper/snapshotter_test.go b/snapshots/devmapper/snapshotter_test.go index 7a3c64d..369d43f 100644 --- a/snapshots/devmapper/snapshotter_test.go +++ b/snapshots/devmapper/snapshotter_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -22,8 +23,6 @@ import ( "context" _ "crypto/sha256" "fmt" - "io/ioutil" - "os" "testing" "time" @@ -46,38 +45,13 @@ func TestSnapshotterSuite(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) snapshotterFn := func(ctx context.Context, root string) (snapshots.Snapshotter, func() error, error) { - // Create loopback devices for each test case - _, loopDataDevice := createLoopbackDevice(t, root) - _, loopMetaDevice := createLoopbackDevice(t, root) - poolName := fmt.Sprintf("containerd-snapshotter-suite-pool-%d", time.Now().Nanosecond()) - err := dmsetup.CreatePool(poolName, loopDataDevice, loopMetaDevice, 64*1024/dmsetup.SectorSize) - assert.NilError(t, err, "failed to create pool %q", poolName) - config := &Config{ RootPath: root, PoolName: poolName, BaseImageSize: "16Mb", } - - snap, err := NewSnapshotter(context.Background(), config) - if err != nil { - return nil, nil, err - } - - // Remove device mapper pool and detach loop devices after test completes - removePool := func() error { - result := multierror.Append( - snap.pool.RemovePool(ctx), - mount.DetachLoopDevice(loopDataDevice, loopMetaDevice)) - - return result.ErrorOrNil() - } - - // Pool cleanup should be called before closing metadata store (as we need to retrieve device names) - snap.cleanupFn = append([]closeFunc{removePool}, snap.cleanupFn...) - - return snap, snap.Close, nil + return createSnapshotter(ctx, t, config) } testsuite.SnapshotterSuite(t, "devmapper", snapshotterFn) @@ -86,11 +60,7 @@ func TestSnapshotterSuite(t *testing.T) { ctx = namespaces.WithNamespace(ctx, "testsuite") t.Run("DevMapperUsage", func(t *testing.T) { - tempDir, err := ioutil.TempDir("", "snapshot-suite-usage") - assert.NilError(t, err) - defer os.RemoveAll(tempDir) - - snapshotter, closer, err := snapshotterFn(ctx, tempDir) + snapshotter, closer, err := snapshotterFn(ctx, t.TempDir()) assert.NilError(t, err) defer closer() @@ -140,8 +110,105 @@ func testUsage(t *testing.T, snapshotter snapshots.Snapshotter) { "%d > %d", layer2Usage.Size, sizeBytes) } -func TestMkfs(t *testing.T) { +func TestMkfsExt4(t *testing.T) { ctx := context.Background() - err := mkfs(ctx, "") + // We test the default setting which is lazy init is disabled + err := mkfs(ctx, "ext4", "nodiscard,lazy_itable_init=0,lazy_journal_init=0", "") assert.ErrorContains(t, err, `mkfs.ext4 couldn't initialize ""`) } + +func TestMkfsExt4NonDefault(t *testing.T) { + ctx := context.Background() + // We test a non default setting where we enable lazy init for ext4 + err := mkfs(ctx, "ext4", "nodiscard", "") + assert.ErrorContains(t, err, `mkfs.ext4 couldn't initialize ""`) +} + +func TestMkfsXfs(t *testing.T) { + ctx := context.Background() + err := mkfs(ctx, "xfs", "", "") + assert.ErrorContains(t, err, `mkfs.xfs couldn't initialize ""`) +} + +func TestMkfsXfsNonDefault(t *testing.T) { + ctx := context.Background() + err := mkfs(ctx, "xfs", "noquota", "") + assert.ErrorContains(t, err, `mkfs.xfs couldn't initialize ""`) +} + +func TestMultipleXfsMounts(t *testing.T) { + testutil.RequiresRoot(t) + + logrus.SetLevel(logrus.DebugLevel) + + ctx := context.Background() + ctx = namespaces.WithNamespace(ctx, "testsuite") + + poolName := fmt.Sprintf("containerd-snapshotter-suite-pool-%d", time.Now().Nanosecond()) + config := &Config{ + RootPath: t.TempDir(), + PoolName: poolName, + BaseImageSize: "16Mb", + FileSystemType: "xfs", + } + snapshotter, closer, err := createSnapshotter(ctx, t, config) + assert.NilError(t, err) + defer closer() + + var ( + sizeBytes int64 = 1048576 // 1MB + baseApplier = fstest.Apply(fstest.CreateRandomFile("/a", 12345679, sizeBytes, 0777)) + ) + + // Create base layer + mounts, err := snapshotter.Prepare(ctx, "prepare-1", "") + assert.NilError(t, err) + + root1 := t.TempDir() + defer func() { + mount.UnmountAll(root1, 0) + }() + err = mount.All(mounts, root1) + assert.NilError(t, err) + baseApplier.Apply(root1) + snapshotter.Commit(ctx, "layer-1", "prepare-1") + + // Create one child layer + mounts, err = snapshotter.Prepare(ctx, "prepare-2", "layer-1") + assert.NilError(t, err) + + root2 := t.TempDir() + defer func() { + mount.UnmountAll(root2, 0) + }() + err = mount.All(mounts, root2) + assert.NilError(t, err) +} + +func createSnapshotter(ctx context.Context, t *testing.T, config *Config) (snapshots.Snapshotter, func() error, error) { + // Create loopback devices for each test case + _, loopDataDevice := createLoopbackDevice(t, config.RootPath) + _, loopMetaDevice := createLoopbackDevice(t, config.RootPath) + + err := dmsetup.CreatePool(config.PoolName, loopDataDevice, loopMetaDevice, 64*1024/dmsetup.SectorSize) + assert.NilError(t, err, "failed to create pool %q", config.PoolName) + + snap, err := NewSnapshotter(ctx, config) + if err != nil { + return nil, nil, err + } + + // Remove device mapper pool and detach loop devices after test completes + removePool := func() error { + result := multierror.Append( + snap.pool.RemovePool(ctx), + mount.DetachLoopDevice(loopDataDevice, loopMetaDevice)) + + return result.ErrorOrNil() + } + + // Pool cleanup should be called before closing metadata store (as we need to retrieve device names) + snap.cleanupFn = append([]closeFunc{removePool}, snap.cleanupFn...) + + return snap, snap.Close, nil +} diff --git a/snapshots/lcow/lcow.go b/snapshots/lcow/lcow.go index 3bc94df..8c911c0 100644 --- a/snapshots/lcow/lcow.go +++ b/snapshots/lcow/lcow.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* @@ -41,7 +42,6 @@ import ( "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/continuity/fs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) func init() { @@ -79,7 +79,7 @@ func NewSnapshotter(root string) (snapshots.Snapshotter, error) { return nil, err } if strings.ToLower(fsType) != "ntfs" { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%s is not on an NTFS volume - only NTFS volumes are supported", root) + return nil, fmt.Errorf("%s is not on an NTFS volume - only NTFS volumes are supported: %w", root, errdefs.ErrInvalidArgument) } if err := os.MkdirAll(root, 0700); err != nil { @@ -179,7 +179,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er snapshot, err := storage.GetSnapshot(ctx, key) if err != nil { - return nil, errors.Wrap(err, "failed to get snapshot mount") + return nil, fmt.Errorf("failed to get snapshot mount: %w", err) } return s.mounts(snapshot), nil } @@ -209,7 +209,7 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap } if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { - return errors.Wrap(err, "failed to commit snapshot") + return fmt.Errorf("failed to commit snapshot: %w", err) } return t.Commit() @@ -226,7 +226,7 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { id, _, err := storage.Remove(ctx, key) if err != nil { - return errors.Wrap(err, "failed to remove") + return fmt.Errorf("failed to remove: %w", err) } path := s.getSnapshotDir(id) @@ -238,9 +238,9 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { if err := t.Commit(); err != nil { if err1 := os.Rename(renamed, path); err1 != nil { // May cause inconsistent data on disk - log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("Failed to rename after failed commit") + log.G(ctx).WithError(err1).WithField("path", renamed).Error("Failed to rename after failed commit") } - return errors.Wrap(err, "failed to commit") + return fmt.Errorf("failed to commit: %w", err) } if err := os.RemoveAll(renamed); err != nil { @@ -318,7 +318,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k newSnapshot, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) if err != nil { - return nil, errors.Wrap(err, "failed to create snapshot") + return nil, fmt.Errorf("failed to create snapshot: %w", err) } if kind == snapshots.KindActive { @@ -384,20 +384,20 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k destPath := filepath.Join(snDir, "sandbox.vhdx") dest, err := os.OpenFile(destPath, os.O_RDWR|os.O_CREATE, 0700) if err != nil { - return nil, errors.Wrap(err, "failed to create sandbox.vhdx in snapshot") + return nil, fmt.Errorf("failed to create sandbox.vhdx in snapshot: %w", err) } defer dest.Close() if _, err := io.Copy(dest, scratchSource); err != nil { dest.Close() os.Remove(destPath) - return nil, errors.Wrap(err, "failed to copy cached scratch.vhdx to sandbox.vhdx in snapshot") + return nil, fmt.Errorf("failed to copy cached scratch.vhdx to sandbox.vhdx in snapshot: %w", err) } } } } if err := t.Commit(); err != nil { - return nil, errors.Wrap(err, "commit failed") + return nil, fmt.Errorf("commit failed: %w", err) } return s.mounts(newSnapshot), nil @@ -416,19 +416,19 @@ func (s *snapshotter) handleSharing(ctx context.Context, id, snDir string) error mounts, err := s.Mounts(ctx, key) if err != nil { - return errors.Wrap(err, "failed to get mounts for owner snapshot") + return fmt.Errorf("failed to get mounts for owner snapshot: %w", err) } sandboxPath := filepath.Join(mounts[0].Source, "sandbox.vhdx") linkPath := filepath.Join(snDir, "sandbox.vhdx") if _, err := os.Stat(sandboxPath); err != nil { - return errors.Wrap(err, "failed to find sandbox.vhdx in snapshot directory") + return fmt.Errorf("failed to find sandbox.vhdx in snapshot directory: %w", err) } // We've found everything we need, now just make a symlink in our new snapshot to the // sandbox.vhdx in the scratch we're asking to share. if err := os.Symlink(sandboxPath, linkPath); err != nil { - return errors.Wrap(err, "failed to create symlink for sandbox scratch space") + return fmt.Errorf("failed to create symlink for sandbox scratch space: %w", err) } return nil } @@ -451,7 +451,7 @@ func (s *snapshotter) openOrCreateScratch(ctx context.Context, sizeGB int, scrat scratchSource, err := os.OpenFile(scratchFinalPath, os.O_RDONLY, 0700) if err != nil { if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "failed to open vhd %s for read", vhdFileName) + return nil, fmt.Errorf("failed to open vhd %s for read: %w", vhdFileName, err) } log.G(ctx).Debugf("vhdx %s not found, creating a new one", vhdFileName) @@ -477,16 +477,16 @@ func (s *snapshotter) openOrCreateScratch(ctx context.Context, sizeGB int, scrat if err := rhcs.CreateScratchWithOpts(ctx, scratchTempPath, &opt); err != nil { os.Remove(scratchTempPath) - return nil, errors.Wrapf(err, "failed to create '%s' temp file", scratchTempName) + return nil, fmt.Errorf("failed to create '%s' temp file: %w", scratchTempName, err) } if err := os.Rename(scratchTempPath, scratchFinalPath); err != nil { os.Remove(scratchTempPath) - return nil, errors.Wrapf(err, "failed to rename '%s' temp file to 'scratch.vhdx'", scratchTempName) + return nil, fmt.Errorf("failed to rename '%s' temp file to 'scratch.vhdx': %w", scratchTempName, err) } scratchSource, err = os.OpenFile(scratchFinalPath, os.O_RDONLY, 0700) if err != nil { os.Remove(scratchFinalPath) - return nil, errors.Wrap(err, "failed to open scratch.vhdx for read after creation") + return nil, fmt.Errorf("failed to open scratch.vhdx for read after creation: %w", err) } } else { log.G(ctx).Debugf("scratch vhd %s was already present. Retrieved from cache", vhdFileName) diff --git a/snapshots/native/native.go b/snapshots/native/native.go index 81d3deb..dd9f8a4 100644 --- a/snapshots/native/native.go +++ b/snapshots/native/native.go @@ -18,7 +18,7 @@ package native import ( "context" - "io/ioutil" + "fmt" "os" "path/filepath" @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/continuity/fs" - "github.com/pkg/errors" ) type snapshotter struct { @@ -138,7 +137,7 @@ func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er s, err := storage.GetSnapshot(ctx, key) t.Rollback() if err != nil { - return nil, errors.Wrap(err, "failed to get snapshot mount") + return nil, fmt.Errorf("failed to get snapshot mount: %w", err) } return o.mounts(s), nil } @@ -151,11 +150,17 @@ func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap id, _, _, err := storage.GetInfo(ctx, key) if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } return err } usage, err := fs.DiskUsage(ctx, o.getSnapshotDir(id)) if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } return err } @@ -163,7 +168,7 @@ func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } - return errors.Wrap(err, "failed to commit snapshot") + return fmt.Errorf("failed to commit snapshot: %w", err) } return t.Commit() } @@ -185,14 +190,14 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { id, _, err := storage.Remove(ctx, key) if err != nil { - return errors.Wrap(err, "failed to remove") + return fmt.Errorf("failed to remove: %w", err) } path := o.getSnapshotDir(id) renamed := filepath.Join(o.root, "snapshots", "rm-"+id) if err := os.Rename(path, renamed); err != nil { if !os.IsNotExist(err) { - return errors.Wrap(err, "failed to rename") + return fmt.Errorf("failed to rename: %w", err) } renamed = "" } @@ -203,10 +208,10 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { if renamed != "" { if err1 := os.Rename(renamed, path); err1 != nil { // May cause inconsistent data on disk - log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("failed to rename after failed commit") + log.G(ctx).WithError(err1).WithField("path", renamed).Error("failed to rename after failed commit") } } - return errors.Wrap(err, "failed to commit") + return fmt.Errorf("failed to commit: %w", err) } if renamed != "" { if err := os.RemoveAll(renamed); err != nil { @@ -234,23 +239,23 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k ) if kind == snapshots.KindActive || parent == "" { - td, err = ioutil.TempDir(filepath.Join(o.root, "snapshots"), "new-") + td, err = os.MkdirTemp(filepath.Join(o.root, "snapshots"), "new-") if err != nil { - return nil, errors.Wrap(err, "failed to create temp dir") + return nil, fmt.Errorf("failed to create temp dir: %w", err) } if err := os.Chmod(td, 0755); err != nil { - return nil, errors.Wrapf(err, "failed to chmod %s to 0755", td) + return nil, fmt.Errorf("failed to chmod %s to 0755: %w", td, err) } defer func() { if err != nil { if td != "" { if err1 := os.RemoveAll(td); err1 != nil { - err = errors.Wrapf(err, "remove failed: %v", err1) + err = fmt.Errorf("remove failed: %v: %w", err1, err) } } if path != "" { if err1 := os.RemoveAll(path); err1 != nil { - err = errors.Wrapf(err, "failed to remove path: %v", err1) + err = fmt.Errorf("failed to remove path: %v: %w", err1, err) } } } @@ -267,7 +272,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } - return nil, errors.Wrap(err, "failed to create snapshot") + return nil, fmt.Errorf("failed to create snapshot: %w", err) } if td != "" { @@ -282,7 +287,10 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k fs.WithXAttrErrorHandler(xattrErrorHandler), } if err := fs.CopyDir(td, parent, copyDirOpts...); err != nil { - return nil, errors.Wrap(err, "copying of parent failed") + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return nil, fmt.Errorf("copying of parent failed: %w", err) } } @@ -291,13 +299,13 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } - return nil, errors.Wrap(err, "failed to rename") + return nil, fmt.Errorf("failed to rename: %w", err) } td = "" } if err := t.Commit(); err != nil { - return nil, errors.Wrap(err, "commit failed") + return nil, fmt.Errorf("commit failed: %w", err) } return o.mounts(s), nil diff --git a/snapshots/native/native_default.go b/snapshots/native/native_default.go index ee594ba..b51bf48 100644 --- a/snapshots/native/native_default.go +++ b/snapshots/native/native_default.go @@ -1,3 +1,4 @@ +//go:build !freebsd // +build !freebsd /* diff --git a/snapshots/native/native_freebsd.go b/snapshots/native/native_freebsd.go index b97eb88..4647785 100644 --- a/snapshots/native/native_freebsd.go +++ b/snapshots/native/native_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/snapshots/overlay/overlay.go b/snapshots/overlay/overlay.go index 441ee10..3df18f3 100644 --- a/snapshots/overlay/overlay.go +++ b/snapshots/overlay/overlay.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -21,7 +22,6 @@ package overlay import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -33,13 +33,19 @@ import ( "github.com/containerd/containerd/snapshots/overlay/overlayutils" "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/continuity/fs" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +// upperdirKey is a key of an optional lablel to each snapshot. +// This optional label of a snapshot contains the location of "upperdir" where +// the change set between this snapshot and its parent is stored. +const upperdirKey = "containerd.io/snapshot/overlay.upperdir" + // SnapshotterConfig is used to configure the overlay snapshotter instance type SnapshotterConfig struct { - asyncRemove bool + asyncRemove bool + upperdirLabel bool + mountOptions []string } // Opt is an option to configure the overlay snapshotter @@ -54,12 +60,30 @@ func AsynchronousRemove(config *SnapshotterConfig) error { return nil } +// WithUpperdirLabel adds as an optional label +// "containerd.io/snapshot/overlay.upperdir". This stores the location +// of the upperdir that contains the changeset between the labelled +// snapshot and its parent. +func WithUpperdirLabel(config *SnapshotterConfig) error { + config.upperdirLabel = true + return nil +} + +// WithMountOptions defines the default mount options used for the overlay mount. +// NOTE: Options are not applied to bind mounts. +func WithMountOptions(options []string) Opt { + return func(config *SnapshotterConfig) error { + config.mountOptions = append(config.mountOptions, options...) + return nil + } +} + type snapshotter struct { - root string - ms *storage.MetaStore - asyncRemove bool - indexOff bool - userxattr bool // whether to enable "userxattr" mount option + root string + ms *storage.MetaStore + asyncRemove bool + upperdirLabel bool + options []string } // NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs @@ -92,27 +116,43 @@ func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) { return nil, err } - // figure out whether "index=off" option is recognized by the kernel - var indexOff bool - if _, err = os.Stat("/sys/module/overlay/parameters/index"); err == nil { - indexOff = true + if !hasOption(config.mountOptions, "userxattr", false) { + // figure out whether "userxattr" option is recognized by the kernel && needed + userxattr, err := overlayutils.NeedsUserXAttr(root) + if err != nil { + logrus.WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr) + } + if userxattr { + config.mountOptions = append(config.mountOptions, "userxattr") + } } - // figure out whether "userxattr" option is recognized by the kernel && needed - userxattr, err := overlayutils.NeedsUserXAttr(root) - if err != nil { - logrus.WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr) + if !hasOption(config.mountOptions, "index", false) && supportsIndex() { + config.mountOptions = append(config.mountOptions, "index=off") } return &snapshotter{ - root: root, - ms: ms, - asyncRemove: config.asyncRemove, - indexOff: indexOff, - userxattr: userxattr, + root: root, + ms: ms, + asyncRemove: config.asyncRemove, + upperdirLabel: config.upperdirLabel, + options: config.mountOptions, }, nil } +func hasOption(options []string, key string, hasValue bool) bool { + for _, option := range options { + if hasValue { + if strings.HasPrefix(option, key) && len(option) > len(key) && option[len(key)] == '=' { + return true + } + } else if option == key { + return true + } + } + return false +} + // Stat returns the info for an active or committed snapshot by name or // key. // @@ -124,11 +164,18 @@ func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, err return snapshots.Info{}, err } defer t.Rollback() - _, info, _, err := storage.GetInfo(ctx, key) + id, info, _, err := storage.GetInfo(ctx, key) if err != nil { return snapshots.Info{}, err } + if o.upperdirLabel { + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[upperdirKey] = o.upperPath(id) + } + return info, nil } @@ -144,6 +191,17 @@ func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath return snapshots.Info{}, err } + if o.upperdirLabel { + id, _, _, err := storage.GetInfo(ctx, info.Name) + if err != nil { + return snapshots.Info{}, err + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[upperdirKey] = o.upperPath(id) + } + if err := t.Commit(); err != nil { return snapshots.Info{}, err } @@ -203,7 +261,7 @@ func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er s, err := storage.GetSnapshot(ctx, key) t.Rollback() if err != nil { - return nil, errors.Wrap(err, "failed to get active mount") + return nil, fmt.Errorf("failed to get active mount: %w", err) } return o.mounts(s), nil } @@ -234,7 +292,7 @@ func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap } if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { - return errors.Wrap(err, "failed to commit snapshot") + return fmt.Errorf("failed to commit snapshot: %w", err) } return t.Commit() } @@ -257,14 +315,14 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { _, _, err = storage.Remove(ctx, key) if err != nil { - return errors.Wrap(err, "failed to remove") + return fmt.Errorf("failed to remove: %w", err) } if !o.asyncRemove { var removals []string removals, err = o.getCleanupDirectories(ctx, t) if err != nil { - return errors.Wrap(err, "unable to get directories for removal") + return fmt.Errorf("unable to get directories for removal: %w", err) } // Remove directories after the transaction is closed, failures must not @@ -292,6 +350,19 @@ func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...str return err } defer t.Rollback() + if o.upperdirLabel { + return storage.WalkInfo(ctx, func(ctx context.Context, info snapshots.Info) error { + id, _, _, err := storage.GetInfo(ctx, info.Name) + if err != nil { + return err + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[upperdirKey] = o.upperPath(id) + return fn(ctx, info) + }, fs...) + } return storage.WalkInfo(ctx, fn, fs...) } @@ -370,7 +441,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if path != "" { if err1 := os.RemoveAll(path); err1 != nil { log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal") - err = errors.Wrapf(err, "failed to remove path: %v", err1) + err = fmt.Errorf("failed to remove path: %v: %w", err1, err) } } } @@ -382,7 +453,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } - return nil, errors.Wrap(err, "failed to create prepare snapshot dir") + return nil, fmt.Errorf("failed to create prepare snapshot dir: %w", err) } rollback := true defer func() { @@ -395,13 +466,13 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) if err != nil { - return nil, errors.Wrap(err, "failed to create snapshot") + return nil, fmt.Errorf("failed to create snapshot: %w", err) } if len(s.ParentIDs) > 0 { st, err := os.Stat(o.upperPath(s.ParentIDs[0])) if err != nil { - return nil, errors.Wrap(err, "failed to stat parent") + return nil, fmt.Errorf("failed to stat parent: %w", err) } stat := st.Sys().(*syscall.Stat_t) @@ -410,28 +481,28 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") } - return nil, errors.Wrap(err, "failed to chown") + return nil, fmt.Errorf("failed to chown: %w", err) } } path = filepath.Join(snapshotDir, s.ID) if err = os.Rename(td, path); err != nil { - return nil, errors.Wrap(err, "failed to rename") + return nil, fmt.Errorf("failed to rename: %w", err) } td = "" rollback = false if err = t.Commit(); err != nil { - return nil, errors.Wrap(err, "commit failed") + return nil, fmt.Errorf("commit failed: %w", err) } return o.mounts(s), nil } func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { - td, err := ioutil.TempDir(snapshotDir, "new-") + td, err := os.MkdirTemp(snapshotDir, "new-") if err != nil { - return "", errors.Wrap(err, "failed to create temp dir") + return "", fmt.Errorf("failed to create temp dir: %w", err) } if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { @@ -467,17 +538,8 @@ func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { }, } } - var options []string - - // set index=off when mount overlayfs - if o.indexOff { - options = append(options, "index=off") - } - - if o.userxattr { - options = append(options, "userxattr") - } + options := o.options if s.Kind == snapshots.KindActive { options = append(options, fmt.Sprintf("workdir=%s", o.workPath(s.ID)), @@ -524,3 +586,11 @@ func (o *snapshotter) workPath(id string) string { func (o *snapshotter) Close() error { return o.ms.Close() } + +// supportsIndex checks whether the "index=off" option is supported by the kernel. +func supportsIndex() bool { + if _, err := os.Stat("/sys/module/overlay/parameters/index"); err == nil { + return true + } + return false +} diff --git a/snapshots/overlay/overlay_test.go b/snapshots/overlay/overlay_test.go index af9d876..b347722 100644 --- a/snapshots/overlay/overlay_test.go +++ b/snapshots/overlay/overlay_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -21,7 +22,6 @@ package overlay import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "syscall" @@ -71,7 +71,7 @@ func TestOverlay(t *testing.T) { testOverlayOverlayRead(t, newSnapshotter) }) t.Run("TestOverlayView", func(t *testing.T) { - testOverlayView(t, newSnapshotter) + testOverlayView(t, newSnapshotterWithOpts(append(opts, WithMountOptions([]string{"volatile"}))...)) }) }) } @@ -79,11 +79,7 @@ func TestOverlay(t *testing.T) { func testOverlayMounts(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { ctx := context.TODO() - root, err := ioutil.TempDir("", "overlay") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) + root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) @@ -113,11 +109,7 @@ func testOverlayMounts(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { func testOverlayCommit(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { ctx := context.TODO() - root, err := ioutil.TempDir("", "overlay") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) + root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) @@ -128,7 +120,7 @@ func testOverlayCommit(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { t.Fatal(err) } m := mounts[0] - if err := ioutil.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { + if err := os.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "base", key); err != nil { @@ -138,11 +130,7 @@ func testOverlayCommit(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { func testOverlayOverlayMount(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { ctx := context.TODO() - root, err := ioutil.TempDir("", "overlay") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) + root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) @@ -178,6 +166,9 @@ func testOverlayOverlayMount(t *testing.T, newSnapshotter testsuite.SnapshotterF expected := []string{ "index=off", } + if !supportsIndex() { + expected = expected[1:] + } if userxattr, err := overlayutils.NeedsUserXAttr(root); err != nil { t.Fatal(err) } else if userxattr { @@ -232,11 +223,7 @@ func getParents(ctx context.Context, sn snapshots.Snapshotter, root, key string) func testOverlayOverlayRead(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { testutil.RequiresRoot(t) ctx := context.TODO() - root, err := ioutil.TempDir("", "overlay") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) + root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) @@ -247,7 +234,7 @@ func testOverlayOverlayRead(t *testing.T, newSnapshotter testsuite.SnapshotterFu t.Fatal(err) } m := mounts[0] - if err := ioutil.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { + if err := os.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "base", key); err != nil { @@ -264,7 +251,7 @@ func testOverlayOverlayRead(t *testing.T, newSnapshotter testsuite.SnapshotterFu t.Fatal(err) } defer syscall.Unmount(dest, 0) - data, err := ioutil.ReadFile(filepath.Join(dest, "foo")) + data, err := os.ReadFile(filepath.Join(dest, "foo")) if err != nil { t.Fatal(err) } @@ -275,11 +262,7 @@ func testOverlayOverlayRead(t *testing.T, newSnapshotter testsuite.SnapshotterFu func testOverlayView(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { ctx := context.TODO() - root, err := ioutil.TempDir("", "overlay") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) + root := t.TempDir() o, _, err := newSnapshotter(ctx, root) if err != nil { t.Fatal(err) @@ -290,7 +273,7 @@ func testOverlayView(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { t.Fatal(err) } m := mounts[0] - if err := ioutil.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { + if err := os.WriteFile(filepath.Join(m.Source, "foo"), []byte("hi"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "base", key); err != nil { @@ -302,7 +285,7 @@ func testOverlayView(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(getParents(ctx, o, root, "/tmp/top")[0], "foo"), []byte("hi, again"), 0660); err != nil { + if err := os.WriteFile(filepath.Join(getParents(ctx, o, root, "/tmp/top")[0], "foo"), []byte("hi, again"), 0660); err != nil { t.Fatal(err) } if err := o.Commit(ctx, "top", key); err != nil { @@ -346,7 +329,11 @@ func testOverlayView(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { t.Errorf("mount source should be overlay but received %q", m.Source) } - expectedOptions := 2 + supportsIndex := supportsIndex() + expectedOptions := 3 + if !supportsIndex { + expectedOptions-- + } userxattr, err := overlayutils.NeedsUserXAttr(root) if err != nil { t.Fatal(err) @@ -360,10 +347,16 @@ func testOverlayView(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { } lowers := getParents(ctx, o, root, "/tmp/view2") expected = fmt.Sprintf("lowerdir=%s:%s", lowers[0], lowers[1]) - optIdx := 1 + optIdx := 2 + if !supportsIndex { + optIdx-- + } if userxattr { optIdx++ } + if m.Options[0] != "volatile" { + t.Error("expected option first option to be provided option \"volatile\"") + } if m.Options[optIdx] != expected { t.Errorf("expected option %q but received %q", expected, m.Options[optIdx]) } diff --git a/snapshots/overlay/overlayutils/check.go b/snapshots/overlay/overlayutils/check.go index bbe2a7d..0eb6b5a 100644 --- a/snapshots/overlay/overlayutils/check.go +++ b/snapshots/overlay/overlayutils/check.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -20,15 +21,20 @@ package overlayutils import ( "fmt" - "io/ioutil" "os" "path/filepath" + "syscall" + kernel "github.com/containerd/containerd/contrib/seccomp/kernelversion" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" - "github.com/pkg/errors" +) + +const ( + // see https://man7.org/linux/man-pages/man2/statfs.2.html + tmpfsMagic = 0x01021994 ) // SupportsMultipleLowerDir checks if the system supports multiple lowerdirs, @@ -39,7 +45,7 @@ import ( // // Ported from moby overlay2. func SupportsMultipleLowerDir(d string) error { - td, err := ioutil.TempDir(d, "multiple-lowerdir-check") + td, err := os.MkdirTemp(d, "multiple-lowerdir-check") if err != nil { return err } @@ -63,7 +69,7 @@ func SupportsMultipleLowerDir(d string) error { } dest := filepath.Join(td, "merged") if err := m.Mount(dest); err != nil { - return errors.Wrap(err, "failed to mount overlay") + return fmt.Errorf("failed to mount overlay: %w", err) } if err := mount.UnmountAll(dest, 0); err != nil { log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) @@ -88,6 +94,21 @@ func Supported(root string) error { return SupportsMultipleLowerDir(root) } +// IsPathOnTmpfs returns whether the path is on a tmpfs or not. +// +// It uses statfs to check if the fs type is TMPFS_MAGIC (0x01021994) +// see https://man7.org/linux/man-pages/man2/statfs.2.html +func IsPathOnTmpfs(d string) bool { + stat := syscall.Statfs_t{} + err := syscall.Statfs(d, &stat) + if err != nil { + log.L.WithError(err).Warnf("Could not retrieve statfs for %v", d) + return false + } + + return stat.Type == tmpfsMagic +} + // NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option. // // The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. @@ -114,10 +135,19 @@ func NeedsUserXAttr(d string) (bool, error) { return false, nil } - // TODO: add fast path for kernel >= 5.11 . + // userxattr not permitted on tmpfs https://man7.org/linux/man-pages/man5/tmpfs.5.html + if IsPathOnTmpfs(d) { + return false, nil + } + + // Fast path on kernels >= 5.11 // - // Keep in mind that distro vendors might be going to backport the patch to older kernels. - // So we can't completely remove the check. + // Keep in mind that distro vendors might be going to backport the patch to older kernels + // so we can't completely remove the "slow path". + fiveDotEleven := kernel.KernelVersion{Kernel: 5, Major: 11} + if ok, err := kernel.GreaterEqualThan(fiveDotEleven); err == nil && ok { + return true, nil + } tdRoot := filepath.Join(d, "userxattr-check") if err := os.RemoveAll(tdRoot); err != nil { @@ -134,7 +164,7 @@ func NeedsUserXAttr(d string) (bool, error) { } }() - td, err := ioutil.TempDir(tdRoot, "") + td, err := os.MkdirTemp(tdRoot, "") if err != nil { return false, err } @@ -146,6 +176,7 @@ func NeedsUserXAttr(d string) (bool, error) { } opts := []string{ + "ro", fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")), "userxattr", } diff --git a/snapshots/overlay/overlayutils/check_test.go b/snapshots/overlay/overlayutils/check_test.go index 77a2872..7000572 100644 --- a/snapshots/overlay/overlayutils/check_test.go +++ b/snapshots/overlay/overlayutils/check_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -19,22 +20,16 @@ package overlayutils import ( - "io/ioutil" - "os" - "os/exec" "testing" "github.com/containerd/containerd/pkg/testutil" "github.com/containerd/continuity/testutil/loopback" + exec "golang.org/x/sys/execabs" ) func testOverlaySupported(t testing.TB, expected bool, mkfs ...string) { testutil.RequiresRoot(t) - mnt, err := ioutil.TempDir("", "containerd-fs-test-supports-overlay") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(mnt) + mnt := t.TempDir() loop, err := loopback.New(100 << 20) // 100 MB if err != nil { diff --git a/snapshots/overlay/plugin/plugin.go b/snapshots/overlay/plugin/plugin.go index 33324d9..3bd8de0 100644 --- a/snapshots/overlay/plugin/plugin.go +++ b/snapshots/overlay/plugin/plugin.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -29,7 +30,12 @@ import ( // Config represents configuration for the overlay plugin. type Config struct { // Root directory for the plugin - RootPath string `toml:"root_path"` + RootPath string `toml:"root_path"` + UpperdirLabel bool `toml:"upperdir_label"` + SyncRemove bool `toml:"sync_remove"` + + // MountOptions are options used for the overlay mount (not used on bind mounts) + MountOptions []string `toml:"mount_options"` } func init() { @@ -50,8 +56,20 @@ func init() { root = config.RootPath } + var oOpts []overlay.Opt + if config.UpperdirLabel { + oOpts = append(oOpts, overlay.WithUpperdirLabel) + } + if !config.SyncRemove { + oOpts = append(oOpts, overlay.AsynchronousRemove) + } + + if len(config.MountOptions) > 0 { + oOpts = append(oOpts, overlay.WithMountOptions(config.MountOptions)) + } + ic.Meta.Exports["root"] = root - return overlay.NewSnapshotter(root, overlay.AsynchronousRemove) + return overlay.NewSnapshotter(root, oOpts...) }, }) } diff --git a/snapshots/snapshotter.go b/snapshots/snapshotter.go index 8b0ea85..e144fb1 100644 --- a/snapshots/snapshotter.go +++ b/snapshots/snapshotter.go @@ -153,10 +153,10 @@ type WalkFunc func(context.Context, Info) error // For consistency, we define the following terms to be used throughout this // interface for snapshotter implementations: // -// `ctx` - refers to a context.Context -// `key` - refers to an active snapshot -// `name` - refers to a committed snapshot -// `parent` - refers to the parent in relation +// `ctx` - refers to a context.Context +// `key` - refers to an active snapshot +// `name` - refers to a committed snapshot +// `parent` - refers to the parent in relation // // Most methods take various combinations of these identifiers. Typically, // `name` and `parent` will be used in cases where a method *only* takes @@ -168,7 +168,7 @@ type WalkFunc func(context.Context, Info) error // We cover several examples below to demonstrate the utility of a snapshot // snapshotter. // -// Importing a Layer +// # Importing a Layer // // To import a layer, we simply have the Snapshotter provide a list of // mounts to be applied such that our dst will capture a changeset. We start @@ -185,7 +185,7 @@ type WalkFunc func(context.Context, Info) error // "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), // }) // mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt) -// if err != nil { ... } +// if err != nil { ... } // // We get back a list of mounts from Snapshotter.Prepare, with the key identifying // the active snapshot. Mount this to the temporary location with the @@ -202,8 +202,8 @@ type WalkFunc func(context.Context, Info) error // // layer, err := os.Open(layerPath) // if err != nil { ... } -// digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location -// if err != nil { ... } +// digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location +// if err != nil { ... } // // When the above completes, we should have a filesystem the represents the // contents of the layer. Careful implementations should verify that digest @@ -221,30 +221,30 @@ type WalkFunc func(context.Context, Info) error // Now, we have a layer in the Snapshotter that can be accessed with the digest // provided during commit. // -// Importing the Next Layer +// # Importing the Next Layer // // Making a layer depend on the above is identical to the process described // above except that the parent is provided as parent when calling // Manager.Prepare, assuming a clean, unique key identifier: // -// mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) +// mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) // // We then mount, apply and commit, as we did above. The new snapshot will be // based on the content of the previous one. // -// Running a Container +// # Running a Container // // To run a container, we simply provide Snapshotter.Prepare the committed image // snapshot as the parent. After mounting, the prepared path can // be used directly as the container's filesystem: // -// mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID) +// mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID) // // The returned mounts can then be passed directly to the container runtime. If // one would like to create a new image from the filesystem, Manager.Commit is // called: // -// if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... } +// if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... } // // Alternatively, for most container runs, Snapshotter.Remove will be called to // signal the Snapshotter to abandon the changes. diff --git a/snapshots/storage/bolt.go b/snapshots/storage/bolt.go index 712c71f..894ac2e 100644 --- a/snapshots/storage/bolt.go +++ b/snapshots/storage/bolt.go @@ -19,6 +19,7 @@ package storage import ( "context" "encoding/binary" + "errors" "fmt" "strings" "time" @@ -27,7 +28,6 @@ import ( "github.com/containerd/containerd/filters" "github.com/containerd/containerd/metadata/boltutil" "github.com/containerd/containerd/snapshots" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -100,7 +100,7 @@ func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { sbkt := bkt.Bucket([]byte(info.Name)) if sbkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound) } if err := readSnapshot(sbkt, nil, &updated); err != nil { return err @@ -122,7 +122,7 @@ func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) case "labels": updated.Labels = info.Labels default: - return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name) + return fmt.Errorf("cannot update %q field on snapshot %q: %w", path, info.Name, errdefs.ErrInvalidArgument) } } } else { @@ -181,25 +181,25 @@ func GetSnapshot(ctx context.Context, key string) (s Snapshot, err error) { err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { sbkt := bkt.Bucket([]byte(key)) if sbkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound) } s.ID = fmt.Sprintf("%d", readID(sbkt)) s.Kind = readKind(sbkt) if s.Kind != snapshots.KindActive && s.Kind != snapshots.KindView { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "requested snapshot %v not active or view", key) + return fmt.Errorf("requested snapshot %v not active or view: %w", key, errdefs.ErrFailedPrecondition) } if parentKey := sbkt.Get(bucketKeyParent); len(parentKey) > 0 { spbkt := bkt.Bucket(parentKey) if spbkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "parent does not exist") + return fmt.Errorf("parent does not exist: %w", errdefs.ErrNotFound) } s.ParentIDs, err = parents(bkt, spbkt, readID(spbkt)) if err != nil { - return errors.Wrap(err, "failed to get parent chain") + return fmt.Errorf("failed to get parent chain: %w", err) } } return nil @@ -216,7 +216,7 @@ func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string switch kind { case snapshots.KindActive, snapshots.KindView: default: - return Snapshot{}, errors.Wrapf(errdefs.ErrInvalidArgument, "snapshot type %v invalid; only snapshots of type Active or View can be created", kind) + return Snapshot{}, fmt.Errorf("snapshot type %v invalid; only snapshots of type Active or View can be created: %w", kind, errdefs.ErrInvalidArgument) } var base snapshots.Info for _, opt := range opts { @@ -232,24 +232,24 @@ func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string if parent != "" { spbkt = bkt.Bucket([]byte(parent)) if spbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q bucket", parent) + return fmt.Errorf("missing parent %q bucket: %w", parent, errdefs.ErrNotFound) } if readKind(spbkt) != snapshots.KindCommitted { - return errors.Wrapf(errdefs.ErrInvalidArgument, "parent %q is not committed snapshot", parent) + return fmt.Errorf("parent %q is not committed snapshot: %w", parent, errdefs.ErrInvalidArgument) } } sbkt, err := bkt.CreateBucket([]byte(key)) if err != nil { if err == bolt.ErrBucketExists { - err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v", key) + err = fmt.Errorf("snapshot %v: %w", key, errdefs.ErrAlreadyExists) } return err } id, err := bkt.NextSequence() if err != nil { - return errors.Wrapf(err, "unable to get identifier for snapshot %q", key) + return fmt.Errorf("unable to get identifier for snapshot %q: %w", key, err) } t := time.Now().UTC() @@ -270,12 +270,12 @@ func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string // Store a backlink from the key to the parent. Store the snapshot name // as the value to allow following the backlink to the snapshot value. if err := pbkt.Put(parentKey(pid, id), []byte(key)); err != nil { - return errors.Wrapf(err, "failed to write parent link for snapshot %q", key) + return fmt.Errorf("failed to write parent link for snapshot %q: %w", key, err) } s.ParentIDs, err = parents(bkt, spbkt, pid) if err != nil { - return errors.Wrapf(err, "failed to get parent chain for snapshot %q", key) + return fmt.Errorf("failed to get parent chain for snapshot %q: %w", key, err) } } @@ -302,33 +302,33 @@ func Remove(ctx context.Context, key string) (string, snapshots.Kind, error) { if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { sbkt := bkt.Bucket([]byte(key)) if sbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound) } if err := readSnapshot(sbkt, &id, &si); err != nil { - return errors.Wrapf(err, "failed to read snapshot %s", key) + return fmt.Errorf("failed to read snapshot %s: %w", key, err) } if pbkt != nil { k, _ := pbkt.Cursor().Seek(parentPrefixKey(id)) if getParentPrefix(k) == id { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child") + return fmt.Errorf("cannot remove snapshot with child: %w", errdefs.ErrFailedPrecondition) } if si.Parent != "" { spbkt := bkt.Bucket([]byte(si.Parent)) if spbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound) } if err := pbkt.Delete(parentKey(readID(spbkt), id)); err != nil { - return errors.Wrap(err, "failed to delete parent link") + return fmt.Errorf("failed to delete parent link: %w", err) } } } if err := bkt.DeleteBucket([]byte(key)); err != nil { - return errors.Wrap(err, "failed to delete snapshot") + return fmt.Errorf("failed to delete snapshot: %w", err) } return nil @@ -362,20 +362,20 @@ func CommitActive(ctx context.Context, key, name string, usage snapshots.Usage, if err == bolt.ErrBucketExists { err = errdefs.ErrAlreadyExists } - return errors.Wrapf(err, "committed snapshot %v", name) + return fmt.Errorf("committed snapshot %v: %w", name, err) } sbkt := bkt.Bucket([]byte(key)) if sbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "failed to get active snapshot %q", key) + return fmt.Errorf("failed to get active snapshot %q: %w", key, errdefs.ErrNotFound) } var si snapshots.Info if err := readSnapshot(sbkt, &id, &si); err != nil { - return errors.Wrapf(err, "failed to read active snapshot %q", key) + return fmt.Errorf("failed to read active snapshot %q: %w", key, err) } if si.Kind != snapshots.KindActive { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "snapshot %q is not active", key) + return fmt.Errorf("snapshot %q is not active: %w", key, errdefs.ErrFailedPrecondition) } si.Kind = snapshots.KindCommitted si.Created = time.Now().UTC() @@ -391,18 +391,18 @@ func CommitActive(ctx context.Context, key, name string, usage snapshots.Usage, return err } if err := bkt.DeleteBucket([]byte(key)); err != nil { - return errors.Wrapf(err, "failed to delete active snapshot %q", key) + return fmt.Errorf("failed to delete active snapshot %q: %w", key, err) } if si.Parent != "" { spbkt := bkt.Bucket([]byte(si.Parent)) if spbkt == nil { - return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q of snapshot %q", si.Parent, key) + return fmt.Errorf("missing parent %q of snapshot %q: %w", si.Parent, key, errdefs.ErrNotFound) } pid := readID(spbkt) // Updates parent back link to use new key if err := pbkt.Put(parentKey(pid, id), []byte(name)); err != nil { - return errors.Wrapf(err, "failed to update parent link %q from %q to %q", pid, key, name) + return fmt.Errorf("failed to update parent link %q from %q to %q: %w", pid, key, name, err) } } @@ -441,15 +441,15 @@ func withSnapshotBucket(ctx context.Context, key string, fn func(context.Context } vbkt := tx.Bucket(bucketKeyStorageVersion) if vbkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound) } bkt := vbkt.Bucket(bucketKeySnapshot) if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "snapshots bucket does not exist") + return fmt.Errorf("snapshots bucket does not exist: %w", errdefs.ErrNotFound) } bkt = bkt.Bucket([]byte(key)) if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound) } return fn(ctx, bkt, vbkt.Bucket(bucketKeyParents)) @@ -462,7 +462,7 @@ func withBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bol } bkt := tx.Bucket(bucketKeyStorageVersion) if bkt == nil { - return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound) } return fn(ctx, bkt.Bucket(bucketKeySnapshot), bkt.Bucket(bucketKeyParents)) } @@ -475,15 +475,15 @@ func createBucketIfNotExists(ctx context.Context, fn func(context.Context, *bolt bkt, err := tx.CreateBucketIfNotExists(bucketKeyStorageVersion) if err != nil { - return errors.Wrap(err, "failed to create version bucket") + return fmt.Errorf("failed to create version bucket: %w", err) } sbkt, err := bkt.CreateBucketIfNotExists(bucketKeySnapshot) if err != nil { - return errors.Wrap(err, "failed to create snapshots bucket") + return fmt.Errorf("failed to create snapshots bucket: %w", err) } pbkt, err := bkt.CreateBucketIfNotExists(bucketKeyParents) if err != nil { - return errors.Wrap(err, "failed to create parents bucket") + return fmt.Errorf("failed to create parents bucket: %w", err) } return fn(ctx, sbkt, pbkt) } @@ -498,7 +498,7 @@ func parents(bkt, pbkt *bolt.Bucket, parent uint64) (parents []string, err error } pbkt = bkt.Bucket(parentKey) if pbkt == nil { - return nil, errors.Wrap(errdefs.ErrNotFound, "missing parent") + return nil, fmt.Errorf("missing parent: %w", errdefs.ErrNotFound) } parent = readID(pbkt) diff --git a/snapshots/storage/metastore.go b/snapshots/storage/metastore.go index 69ba3ea..6ba2f15 100644 --- a/snapshots/storage/metastore.go +++ b/snapshots/storage/metastore.go @@ -23,10 +23,10 @@ package storage import ( "context" + "fmt" "sync" "github.com/containerd/containerd/snapshots" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -88,7 +88,7 @@ func (ms *MetaStore) TransactionContext(ctx context.Context, writable bool) (con db, err := bolt.Open(ms.dbfile, 0600, nil) if err != nil { ms.dbL.Unlock() - return ctx, nil, errors.Wrap(err, "failed to open database file") + return ctx, nil, fmt.Errorf("failed to open database file: %w", err) } ms.db = db } @@ -96,7 +96,7 @@ func (ms *MetaStore) TransactionContext(ctx context.Context, writable bool) (con tx, err := ms.db.Begin(writable) if err != nil { - return ctx, nil, errors.Wrap(err, "failed to start transaction") + return ctx, nil, fmt.Errorf("failed to start transaction: %w", err) } ctx = context.WithValue(ctx, transactionKey{}, tx) diff --git a/snapshots/storage/metastore_bench_test.go b/snapshots/storage/metastore_bench_test.go index 4c51ded..8f360c1 100644 --- a/snapshots/storage/metastore_bench_test.go +++ b/snapshots/storage/metastore_bench_test.go @@ -19,8 +19,6 @@ package storage import ( "context" "fmt" - "io/ioutil" - "os" "testing" "github.com/containerd/containerd/snapshots" @@ -43,17 +41,16 @@ func Benchmarks(b *testing.B, name string, metaFn metaFactory) { func makeBench(b *testing.B, name string, metaFn metaFactory, fn func(context.Context, *testing.B, *MetaStore)) func(b *testing.B) { return func(b *testing.B) { ctx := context.Background() - tmpDir, err := ioutil.TempDir("", "metastore-bench-"+name+"-") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(tmpDir) - ms, err := metaFn(tmpDir) + ms, err := metaFn(b.TempDir()) if err != nil { b.Fatal(err) } + b.Cleanup(func() { + ms.Close() + }) + ctx, t, err := ms.TransactionContext(ctx, true) if err != nil { b.Fatal(err) @@ -68,13 +65,8 @@ func makeBench(b *testing.B, name string, metaFn metaFactory, fn func(context.Co func openCloseWritable(b *testing.B, name string, metaFn metaFactory) func(b *testing.B) { return func(b *testing.B) { ctx := context.Background() - tmpDir, err := ioutil.TempDir("", "metastore-bench-"+name+"-") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(tmpDir) - ms, err := metaFn(tmpDir) + ms, err := metaFn(b.TempDir()) if err != nil { b.Fatal(err) } @@ -96,13 +88,8 @@ func openCloseWritable(b *testing.B, name string, metaFn metaFactory) func(b *te func openCloseReadonly(b *testing.B, name string, metaFn metaFactory) func(b *testing.B) { return func(b *testing.B) { ctx := context.Background() - tmpDir, err := ioutil.TempDir("", "metastore-bench-"+name+"-") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(tmpDir) - ms, err := metaFn(tmpDir) + ms, err := metaFn(b.TempDir()) if err != nil { b.Fatal(err) } diff --git a/snapshots/storage/metastore_test.go b/snapshots/storage/metastore_test.go index 90ed2ae..10aedc4 100644 --- a/snapshots/storage/metastore_test.go +++ b/snapshots/storage/metastore_test.go @@ -18,16 +18,14 @@ package storage import ( "context" + "errors" "fmt" - "io/ioutil" - "os" "testing" "time" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/snapshots" "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" ) @@ -67,17 +65,16 @@ func MetaStoreSuite(t *testing.T, name string, meta func(root string) (*MetaStor func makeTest(t *testing.T, name string, metaFn metaFactory, fn testFunc) func(t *testing.T) { return func(t *testing.T) { ctx := context.Background() - tmpDir, err := ioutil.TempDir("", "metastore-test-"+name+"-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - ms, err := metaFn(tmpDir) + ms, err := metaFn(t.TempDir()) if err != nil { t.Fatal(err) } + t.Cleanup(func() { + ms.Close() + }) + fn(ctx, t, ms) } } @@ -148,31 +145,31 @@ func inWriteTransaction(fn testFunc) testFunc { // - "active-5": readonly active with parent "committed-2" func basePopulate(ctx context.Context, ms *MetaStore) error { if _, err := CreateSnapshot(ctx, snapshots.KindActive, "committed-tmp-1", ""); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CommitActive(ctx, "committed-tmp-1", "committed-1", snapshots.Usage{Size: 1}); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CreateSnapshot(ctx, snapshots.KindActive, "committed-tmp-2", "committed-1"); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CommitActive(ctx, "committed-tmp-2", "committed-2", snapshots.Usage{Size: 2}); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CreateSnapshot(ctx, snapshots.KindActive, "active-1", ""); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CreateSnapshot(ctx, snapshots.KindActive, "active-2", "committed-1"); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CreateSnapshot(ctx, snapshots.KindActive, "active-3", "committed-2"); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CreateSnapshot(ctx, snapshots.KindView, "view-1", ""); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CreateSnapshot(ctx, snapshots.KindView, "view-2", "committed-2"); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } return nil } @@ -274,7 +271,7 @@ func testWalk(ctx context.Context, t *testing.T, _ *MetaStore) { found := map[string]snapshots.Info{} err := WalkInfo(ctx, func(ctx context.Context, info snapshots.Info) error { if _, ok := found[info.Name]; ok { - return errors.Errorf("entry already encountered") + return errors.New("entry already encountered") } found[info.Name] = info return nil @@ -287,10 +284,10 @@ func testGetSnapshot(ctx context.Context, t *testing.T, ms *MetaStore) { snapshotMap := map[string]Snapshot{} populate := func(ctx context.Context, ms *MetaStore) error { if _, err := CreateSnapshot(ctx, snapshots.KindActive, "committed-tmp-1", ""); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } if _, err := CommitActive(ctx, "committed-tmp-1", "committed-1", snapshots.Usage{}); err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } for _, opts := range []struct { @@ -319,7 +316,7 @@ func testGetSnapshot(ctx context.Context, t *testing.T, ms *MetaStore) { } { active, err := CreateSnapshot(ctx, opts.Kind, opts.Name, opts.Parent) if err != nil { - return errors.Wrap(err, "failed to create active") + return fmt.Errorf("failed to create active: %w", err) } snapshotMap[opts.Name] = active } diff --git a/snapshots/testsuite/helpers.go b/snapshots/testsuite/helpers.go index 39f9cc5..9fe9ff9 100644 --- a/snapshots/testsuite/helpers.go +++ b/snapshots/testsuite/helpers.go @@ -19,29 +19,27 @@ package testsuite import ( "context" "fmt" - "io/ioutil" - "math/rand" "os" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/randutil" "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs/fstest" - "github.com/pkg/errors" ) func applyToMounts(m []mount.Mount, work string, a fstest.Applier) (err error) { - td, err := ioutil.TempDir(work, "prepare") + td, err := os.MkdirTemp(work, "prepare") if err != nil { - return errors.Wrap(err, "failed to create temp dir") + return fmt.Errorf("failed to create temp dir: %w", err) } defer os.RemoveAll(td) if err := mount.All(m, td); err != nil { - return errors.Wrap(err, "failed to mount") + return fmt.Errorf("failed to mount: %w", err) } defer func() { - if err1 := mount.UnmountAll(td, umountflags); err == nil { - err = errors.Wrap(err1, "failed to unmount") + if err1 := mount.UnmountAll(td, umountflags); err1 != nil && err == nil { + err = fmt.Errorf("failed to unmount: %w", err1) } }() @@ -51,58 +49,58 @@ func applyToMounts(m []mount.Mount, work string, a fstest.Applier) (err error) { // createSnapshot creates a new snapshot in the snapshotter // given an applier to run on top of the given parent. func createSnapshot(ctx context.Context, sn snapshots.Snapshotter, parent, work string, a fstest.Applier) (string, error) { - n := fmt.Sprintf("%p-%d", a, rand.Int()) + n := fmt.Sprintf("%p-%d", a, randutil.Int()) prepare := fmt.Sprintf("%s-prepare", n) m, err := sn.Prepare(ctx, prepare, parent, opt) if err != nil { - return "", errors.Wrap(err, "failed to prepare snapshot") + return "", fmt.Errorf("failed to prepare snapshot: %w", err) } if err := applyToMounts(m, work, a); err != nil { - return "", errors.Wrap(err, "failed to apply") + return "", fmt.Errorf("failed to apply: %w", err) } if err := sn.Commit(ctx, n, prepare, opt); err != nil { - return "", errors.Wrap(err, "failed to commit") + return "", fmt.Errorf("failed to commit: %w", err) } return n, nil } func checkSnapshot(ctx context.Context, sn snapshots.Snapshotter, work, name, check string) (err error) { - td, err := ioutil.TempDir(work, "check") + td, err := os.MkdirTemp(work, "check") if err != nil { - return errors.Wrap(err, "failed to create temp dir") + return fmt.Errorf("failed to create temp dir: %w", err) } defer func() { - if err1 := os.RemoveAll(td); err == nil { - err = errors.Wrapf(err1, "failed to remove temporary directory %s", td) + if err1 := os.RemoveAll(td); err1 != nil && err == nil { + err = fmt.Errorf("failed to remove temporary directory %s: %w", td, err1) } }() view := fmt.Sprintf("%s-view", name) m, err := sn.View(ctx, view, name, opt) if err != nil { - return errors.Wrap(err, "failed to create view") + return fmt.Errorf("failed to create view: %w", err) } defer func() { - if err1 := sn.Remove(ctx, view); err == nil { - err = errors.Wrap(err1, "failed to remove view") + if err1 := sn.Remove(ctx, view); err1 != nil && err == nil { + err = fmt.Errorf("failed to remove view: %w", err1) } }() if err := mount.All(m, td); err != nil { - return errors.Wrap(err, "failed to mount") + return fmt.Errorf("failed to mount: %w", err) } defer func() { - if err1 := mount.UnmountAll(td, umountflags); err == nil { - err = errors.Wrap(err1, "failed to unmount view") + if err1 := mount.UnmountAll(td, umountflags); err1 != nil && err == nil { + err = fmt.Errorf("failed to unmount view: %w", err1) } }() if err := fstest.CheckDirectoryEqual(check, td); err != nil { - return errors.Wrap(err, "check directory failed") + return fmt.Errorf("check directory failed: %w", err) } return nil @@ -112,9 +110,9 @@ func checkSnapshot(ctx context.Context, sn snapshots.Snapshotter, work, name, ch // using the provided appliers, checking each snapshot created in a view // against the changes applied to a single directory. func checkSnapshots(ctx context.Context, sn snapshots.Snapshotter, work string, as ...fstest.Applier) error { - td, err := ioutil.TempDir(work, "flat") + td, err := os.MkdirTemp(work, "flat") if err != nil { - return errors.Wrap(err, "failed to create temp dir") + return fmt.Errorf("failed to create temp dir: %w", err) } defer os.RemoveAll(td) @@ -122,15 +120,15 @@ func checkSnapshots(ctx context.Context, sn snapshots.Snapshotter, work string, for i, a := range as { s, err := createSnapshot(ctx, sn, parentID, work, a) if err != nil { - return errors.Wrapf(err, "failed to create snapshot %d", i+1) + return fmt.Errorf("failed to create snapshot %d: %w", i+1, err) } if err := a.Apply(td); err != nil { - return errors.Wrapf(err, "failed to apply to check directory on %d", i+1) + return fmt.Errorf("failed to apply to check directory on %d: %w", i+1, err) } if err := checkSnapshot(ctx, sn, work, s, td); err != nil { - return errors.Wrapf(err, "snapshot check failed on snapshot %d", i+1) + return fmt.Errorf("snapshot check failed on snapshot %d: %w", i+1, err) } parentID = s @@ -142,28 +140,28 @@ func checkSnapshots(ctx context.Context, sn snapshots.Snapshotter, work string, // checkInfo checks that the infos are the same func checkInfo(si1, si2 snapshots.Info) error { if si1.Kind != si2.Kind { - return errors.Errorf("Expected kind %v, got %v", si1.Kind, si2.Kind) + return fmt.Errorf("Expected kind %v, got %v", si1.Kind, si2.Kind) } if si1.Name != si2.Name { - return errors.Errorf("Expected name %v, got %v", si1.Name, si2.Name) + return fmt.Errorf("Expected name %v, got %v", si1.Name, si2.Name) } if si1.Parent != si2.Parent { - return errors.Errorf("Expected Parent %v, got %v", si1.Parent, si2.Parent) + return fmt.Errorf("Expected Parent %v, got %v", si1.Parent, si2.Parent) } if len(si1.Labels) != len(si2.Labels) { - return errors.Errorf("Expected %d labels, got %d", len(si1.Labels), len(si2.Labels)) + return fmt.Errorf("Expected %d labels, got %d", len(si1.Labels), len(si2.Labels)) } for k, l1 := range si1.Labels { l2 := si2.Labels[k] if l1 != l2 { - return errors.Errorf("Expected label %v, got %v", l1, l2) + return fmt.Errorf("Expected label %v, got %v", l1, l2) } } if si1.Created != si2.Created { - return errors.Errorf("Expected Created %v, got %v", si1.Created, si2.Created) + return fmt.Errorf("Expected Created %v, got %v", si1.Created, si2.Created) } if si1.Updated != si2.Updated { - return errors.Errorf("Expected Updated %v, got %v", si1.Updated, si2.Updated) + return fmt.Errorf("Expected Updated %v, got %v", si1.Updated, si2.Updated) } return nil diff --git a/snapshots/testsuite/helpers_other.go b/snapshots/testsuite/helpers_other.go index aeebd7d..2c5f65a 100644 --- a/snapshots/testsuite/helpers_other.go +++ b/snapshots/testsuite/helpers_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/snapshots/testsuite/testsuite.go b/snapshots/testsuite/testsuite.go index 367a008..385d109 100644 --- a/snapshots/testsuite/testsuite.go +++ b/snapshots/testsuite/testsuite.go @@ -18,11 +18,9 @@ package testsuite import ( "context" - //nolint:golint + //nolint:revive // go-digest needs the blank import. See https://github.com/opencontainers/go-digest#usage. _ "crypto/sha256" "fmt" - "io/ioutil" - "math/rand" "os" "path/filepath" "sort" @@ -33,6 +31,7 @@ import ( "github.com/containerd/containerd/log/logtest" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/randutil" "github.com/containerd/containerd/pkg/testutil" "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs/fstest" @@ -87,7 +86,7 @@ func makeTest(name string, snapshotterFn func(ctx context.Context, root string) // work/ -> passed to test functions // root/ -> passed to snapshotter // - tmpDir, err := ioutil.TempDir("", "snapshot-suite-"+name+"-") + tmpDir, err := os.MkdirTemp("", "snapshot-suite-"+name+"-") if err != nil { t.Fatal(err) } @@ -297,7 +296,7 @@ func checkSnapshotterStatActive(ctx context.Context, t *testing.T, snapshotter s } defer testutil.Unmount(t, preparing) - if err = ioutil.WriteFile(filepath.Join(preparing, "foo"), []byte("foo\n"), 0777); err != nil { + if err = os.WriteFile(filepath.Join(preparing, "foo"), []byte("foo\n"), 0777); err != nil { t.Fatal(err) } @@ -331,7 +330,7 @@ func checkSnapshotterStatCommitted(ctx context.Context, t *testing.T, snapshotte } defer testutil.Unmount(t, preparing) - if err = ioutil.WriteFile(filepath.Join(preparing, "foo"), []byte("foo\n"), 0777); err != nil { + if err = os.WriteFile(filepath.Join(preparing, "foo"), []byte("foo\n"), 0777); err != nil { t.Fatal(err) } @@ -379,7 +378,7 @@ func checkSnapshotterTransitivity(ctx context.Context, t *testing.T, snapshotter } defer testutil.Unmount(t, preparing) - if err = ioutil.WriteFile(filepath.Join(preparing, "foo"), []byte("foo\n"), 0777); err != nil { + if err = os.WriteFile(filepath.Join(preparing, "foo"), []byte("foo\n"), 0777); err != nil { t.Fatal(err) } @@ -394,7 +393,7 @@ func checkSnapshotterTransitivity(ctx context.Context, t *testing.T, snapshotter } defer testutil.Unmount(t, next) - if err = ioutil.WriteFile(filepath.Join(next, "foo"), []byte("foo bar\n"), 0777); err != nil { + if err = os.WriteFile(filepath.Join(next, "foo"), []byte("foo bar\n"), 0777); err != nil { t.Fatal(err) } @@ -501,7 +500,7 @@ func checkDeletedFilesInChildSnapshot(ctx context.Context, t *testing.T, snapsho } -//Create three layers. Deleting intermediate layer must fail. +// Create three layers. Deleting intermediate layer must fail. func checkRemoveIntermediateSnapshot(ctx context.Context, t *testing.T, snapshotter snapshots.Snapshotter, work string) { base, err := snapshotterPrepareMount(ctx, snapshotter, "base", "", work) @@ -555,12 +554,13 @@ func checkRemoveIntermediateSnapshot(ctx context.Context, t *testing.T, snapshot // baseTestSnapshots creates a base set of snapshots for tests, each snapshot is empty // Tests snapshots: -// c1 - committed snapshot, no parent -// c2 - committed snapshot, c1 is parent -// a1 - active snapshot, c2 is parent -// a1 - active snapshot, no parent -// v1 - view snapshot, v1 is parent -// v2 - view snapshot, no parent +// +// c1 - committed snapshot, no parent +// c2 - committed snapshot, c1 is parent +// a1 - active snapshot, c2 is parent +// a1 - active snapshot, no parent +// v1 - view snapshot, v1 is parent +// v2 - view snapshot, no parent func baseTestSnapshots(ctx context.Context, snapshotter snapshots.Snapshotter) error { if _, err := snapshotter.Prepare(ctx, "c1-a", "", opt); err != nil { return err @@ -804,7 +804,7 @@ func checkSnapshotterViewReadonly(ctx context.Context, t *testing.T, snapshotter } testfile := filepath.Join(viewMountPoint, "testfile") - if err := ioutil.WriteFile(testfile, []byte("testcontent"), 0777); err != nil { + if err := os.WriteFile(testfile, []byte("testcontent"), 0777); err != nil { t.Logf("write to %q failed with %v (EROFS is expected but can be other error code)", testfile, err) } else { t.Fatalf("write to %q should fail (EROFS) but did not fail", testfile) @@ -837,7 +837,7 @@ func checkFileFromLowerLayer(ctx context.Context, t *testing.T, snapshotter snap } func closeTwice(ctx context.Context, t *testing.T, snapshotter snapshots.Snapshotter, work string) { - n := fmt.Sprintf("closeTwice-%d", rand.Int()) + n := fmt.Sprintf("closeTwice-%d", randutil.Int()) prepare := fmt.Sprintf("%s-prepare", n) // do some dummy ops to modify the snapshotter internal state diff --git a/snapshots/testsuite/testsuite_unix.go b/snapshots/testsuite/testsuite_unix.go index adc0ef4..94b8218 100644 --- a/snapshots/testsuite/testsuite_unix.go +++ b/snapshots/testsuite/testsuite_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/snapshots/windows/windows.go b/snapshots/windows/windows.go index 9f05ed9..820f985 100644 --- a/snapshots/windows/windows.go +++ b/snapshots/windows/windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* @@ -21,9 +22,9 @@ package windows import ( "context" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strconv" @@ -43,7 +44,6 @@ import ( "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/continuity/fs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) func init() { @@ -77,7 +77,7 @@ func NewSnapshotter(root string) (snapshots.Snapshotter, error) { return nil, err } if strings.ToLower(fsType) != "ntfs" { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%s is not on an NTFS volume - only NTFS volumes are supported", root) + return nil, fmt.Errorf("%s is not on an NTFS volume - only NTFS volumes are supported: %w", root, errdefs.ErrInvalidArgument) } if err := os.MkdirAll(root, 0700); err != nil { @@ -182,7 +182,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er snapshot, err := storage.GetSnapshot(ctx, key) if err != nil { - return nil, errors.Wrap(err, "failed to get snapshot mount") + return nil, fmt.Errorf("failed to get snapshot mount: %w", err) } return s.mounts(snapshot), nil } @@ -204,7 +204,7 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap // grab the existing id id, _, _, err := storage.GetInfo(ctx, key) if err != nil { - return errors.Wrapf(err, "failed to get storage info for %s", key) + return fmt.Errorf("failed to get storage info for %s: %w", key, err) } snapshot, err := storage.GetSnapshot(ctx, key) @@ -224,11 +224,11 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap usage, err := fs.DiskUsage(ctx, path) if err != nil { - return errors.Wrapf(err, "failed to collect disk usage of snapshot storage: %s", path) + return fmt.Errorf("failed to collect disk usage of snapshot storage: %s: %w", path, err) } if _, err := storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { - return errors.Wrap(err, "failed to commit snapshot") + return fmt.Errorf("failed to commit snapshot: %w", err) } return t.Commit() } @@ -244,7 +244,7 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { id, _, err := storage.Remove(ctx, key) if err != nil { - return errors.Wrap(err, "failed to remove") + return fmt.Errorf("failed to remove: %w", err) } path := s.getSnapshotDir(id) @@ -265,20 +265,20 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { ) if deactivateErr := hcsshim.DeactivateLayer(di, layerID); deactivateErr != nil { - return errors.Wrapf(err, "failed to deactivate layer following failed rename: %s", deactivateErr) + return fmt.Errorf("failed to deactivate layer following failed rename: %s: %w", deactivateErr, err) } if renameErr := os.Rename(path, renamed); renameErr != nil && !os.IsNotExist(renameErr) { - return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr) + return fmt.Errorf("second rename attempt following detach failed: %s: %w", renameErr, err) } } if err := t.Commit(); err != nil { if err1 := os.Rename(renamed, path); err1 != nil { // May cause inconsistent data on disk - log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("Failed to rename after failed commit") + log.G(ctx).WithError(err1).WithField("path", renamed).Error("Failed to rename after failed commit") } - return errors.Wrap(err, "failed to commit") + return fmt.Errorf("failed to commit: %w", err) } if err := hcsshim.DestroyLayer(s.info, renamedID); err != nil { @@ -356,7 +356,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k newSnapshot, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) if err != nil { - return nil, errors.Wrap(err, "failed to create snapshot") + return nil, fmt.Errorf("failed to create snapshot: %w", err) } if kind == snapshots.KindActive { @@ -385,7 +385,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if sizeGBstr, ok := snapshotInfo.Labels[rootfsSizeLabel]; ok { i32, err := strconv.ParseInt(sizeGBstr, 10, 32) if err != nil { - return nil, errors.Wrapf(err, "failed to parse label %q=%q", rootfsSizeLabel, sizeGBstr) + return nil, fmt.Errorf("failed to parse label %q=%q: %w", rootfsSizeLabel, sizeGBstr, err) } sizeGB = int(i32) } @@ -398,17 +398,17 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k // This has to be run first to avoid clashing with the containers sandbox.vhdx. if makeUVMScratch { if err := s.createUVMScratchLayer(ctx, snDir, parentLayerPaths); err != nil { - return nil, errors.Wrap(err, "failed to make UVM's scratch layer") + return nil, fmt.Errorf("failed to make UVM's scratch layer: %w", err) } } if err := s.createScratchLayer(ctx, snDir, parentLayerPaths, sizeGB); err != nil { - return nil, errors.Wrap(err, "failed to create scratch layer") + return nil, fmt.Errorf("failed to create scratch layer: %w", err) } } } if err := t.Commit(); err != nil { - return nil, errors.Wrap(err, "commit failed") + return nil, fmt.Errorf("commit failed: %w", err) } return s.mounts(newSnapshot), nil @@ -450,7 +450,7 @@ func (s *snapshotter) createScratchLayer(ctx context.Context, snDir string, pare if _, err := os.Stat(templateDiffDisk); os.IsNotExist(err) { // Scratch disk not present so lets make it. if err := computestorage.SetupContainerBaseLayer(ctx, baseLayer, templateBase, templateDiffDisk, 1); err != nil { - return errors.Wrapf(err, "failed to create scratch vhdx at %q", baseLayer) + return fmt.Errorf("failed to create scratch vhdx at %q: %w", baseLayer, err) } } @@ -462,13 +462,13 @@ func (s *snapshotter) createScratchLayer(ctx context.Context, snDir string, pare if expand { gbToByte := 1024 * 1024 * 1024 if err := hcsshim.ExpandSandboxSize(s.info, filepath.Base(snDir), uint64(gbToByte*sizeGB)); err != nil { - return errors.Wrapf(err, "failed to expand sandbox vhdx size to %d GB", sizeGB) + return fmt.Errorf("failed to expand sandbox vhdx size to %d GB: %w", sizeGB, err) } } return nil } -// convertScratchToReadOnlyLayer reimporst the layer over itself, to transfer the files from the sandbox.vhdx to the on-disk storage. +// convertScratchToReadOnlyLayer reimports the layer over itself, to transfer the files from the sandbox.vhdx to the on-disk storage. func (s *snapshotter) convertScratchToReadOnlyLayer(ctx context.Context, snapshot storage.Snapshot, path string) (retErr error) { // TODO darrenstahlmsft: When this is done isolated, we should disable these. @@ -476,7 +476,7 @@ func (s *snapshotter) convertScratchToReadOnlyLayer(ctx context.Context, snapsho // temporary, leaving it enabled is OK for now. // https://github.com/containerd/containerd/issues/1681 if err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { - return errors.Wrap(err, "failed to enable necessary privileges") + return fmt.Errorf("failed to enable necessary privileges: %w", err) } parentLayerPaths := s.parentIDsToParentPaths(snapshot.ParentIDs) @@ -488,11 +488,11 @@ func (s *snapshotter) convertScratchToReadOnlyLayer(ctx context.Context, snapsho }() if _, err := ociwclayer.ImportLayerFromTar(ctx, reader, path, parentLayerPaths); err != nil { - return errors.Wrap(err, "failed to reimport snapshot") + return fmt.Errorf("failed to reimport snapshot: %w", err) } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { - return errors.Wrap(err, "failed discarding extra data in import stream") + if _, err := io.Copy(io.Discard, reader); err != nil { + return fmt.Errorf("failed discarding extra data in import stream: %w", err) } // NOTE: We do not delete the sandbox.vhdx here, as that will break later calls to @@ -516,7 +516,7 @@ func (s *snapshotter) createUVMScratchLayer(ctx context.Context, snDir string, p // Make sure base layer has a UtilityVM folder. uvmPath := filepath.Join(baseLayer, "UtilityVM") if _, err := os.Stat(uvmPath); os.IsNotExist(err) { - return errors.Wrapf(err, "failed to find UtilityVM directory in base layer %q", baseLayer) + return fmt.Errorf("failed to find UtilityVM directory in base layer %q: %w", baseLayer, err) } templateDiffDisk := filepath.Join(uvmPath, "SystemTemplate.vhdx") @@ -530,7 +530,7 @@ func (s *snapshotter) createUVMScratchLayer(ctx context.Context, snDir string, p // Move the sandbox.vhdx into a nested vm folder to avoid clashing with a containers sandbox.vhdx. vmScratchDir := filepath.Join(snDir, "vm") if err := os.MkdirAll(vmScratchDir, 0777); err != nil { - return errors.Wrap(err, "failed to make `vm` directory for vm's scratch space") + return fmt.Errorf("failed to make `vm` directory for vm's scratch space: %w", err) } return copyScratchDisk(templateDiffDisk, filepath.Join(vmScratchDir, "sandbox.vhdx")) @@ -539,19 +539,19 @@ func (s *snapshotter) createUVMScratchLayer(ctx context.Context, snDir string, p func copyScratchDisk(source, dest string) error { scratchSource, err := os.OpenFile(source, os.O_RDWR, 0700) if err != nil { - return errors.Wrapf(err, "failed to open %s", source) + return fmt.Errorf("failed to open %s: %w", source, err) } defer scratchSource.Close() f, err := os.OpenFile(dest, os.O_RDWR|os.O_CREATE, 0700) if err != nil { - return errors.Wrap(err, "failed to create sandbox.vhdx in snapshot") + return fmt.Errorf("failed to create sandbox.vhdx in snapshot: %w", err) } defer f.Close() if _, err := io.Copy(f, scratchSource); err != nil { os.Remove(dest) - return errors.Wrapf(err, "failed to copy cached %q to %q in snapshot", source, dest) + return fmt.Errorf("failed to copy cached %q to %q in snapshot: %w", source, dest, err) } return nil } diff --git a/snapshotter_default_unix.go b/snapshotter_default_unix.go index eb001c7..dcba479 100644 --- a/snapshotter_default_unix.go +++ b/snapshotter_default_unix.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd || solaris // +build darwin freebsd solaris /* diff --git a/snapshotter_opts_unix.go b/snapshotter_opts_unix.go index 1964379..2a2c829 100644 --- a/snapshotter_opts_unix.go +++ b/snapshotter_opts_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/sys/epoll.go b/sys/epoll.go index 28d6c2c..73a5701 100644 --- a/sys/epoll.go +++ b/sys/epoll.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/sys/fds.go b/sys/fds.go index db3cf70..a71a9cd 100644 --- a/sys/fds.go +++ b/sys/fds.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin // +build !windows,!darwin /* @@ -19,14 +20,14 @@ package sys import ( - "io/ioutil" + "os" "path/filepath" "strconv" ) // GetOpenFds returns the number of open fds for the process provided by pid func GetOpenFds(pid int) (int, error) { - dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) if err != nil { return -1, err } diff --git a/sys/filesys_unix.go b/sys/filesys_unix.go index d8329af..805a7a7 100644 --- a/sys/filesys_unix.go +++ b/sys/filesys_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/sys/filesys_windows.go b/sys/filesys_windows.go index a9198ef..87ebacc 100644 --- a/sys/filesys_windows.go +++ b/sys/filesys_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,6 +17,7 @@ package sys import ( + "fmt" "os" "path/filepath" "regexp" @@ -29,7 +28,6 @@ import ( "unsafe" "github.com/Microsoft/hcsshim" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -270,7 +268,7 @@ func ForceRemoveAll(path string) error { snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { if err := cleanupWCOWLayers(snapshotDir); err != nil { - return errors.Wrapf(err, "failed to cleanup WCOW layers in %s", snapshotDir) + return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) } } @@ -280,12 +278,22 @@ func ForceRemoveAll(path string) error { func cleanupWCOWLayers(root string) error { // See snapshots/windows/windows.go getSnapshotDir() var layerNums []int + var rmLayerNums []int if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if path != root && info.IsDir() { - if layerNum, err := strconv.Atoi(filepath.Base(path)); err == nil { - layerNums = append(layerNums, layerNum) + name := filepath.Base(path) + if strings.HasPrefix(name, "rm-") { + layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) + if err != nil { + return err + } + rmLayerNums = append(rmLayerNums, layerNum) } else { - return err + layerNum, err := strconv.Atoi(name) + if err != nil { + return err + } + layerNums = append(layerNums, layerNum) } return filepath.SkipDir } @@ -295,8 +303,14 @@ func cleanupWCOWLayers(root string) error { return err } - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) + sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) + for _, rmLayerNum := range rmLayerNums { + if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { + return err + } + } + sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) for _, layerNum := range layerNums { if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { return err @@ -311,19 +325,20 @@ func cleanupWCOWLayer(layerPath string) error { HomeDir: filepath.Dir(layerPath), } - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared. + // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. + // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || hcserror.Err != windows.ERROR_DEV_NOT_EXIST { - return errors.Wrapf(err, "failed to unprepare %s", layerPath) + if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { + return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) } } if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to deactivate %s", layerPath) + return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) } if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to destroy %s", layerPath) + return fmt.Errorf("failed to destroy %s: %w", layerPath, err) } return nil diff --git a/sys/oom_linux.go b/sys/oom_linux.go index 82a347c..bb2a3ea 100644 --- a/sys/oom_linux.go +++ b/sys/oom_linux.go @@ -18,7 +18,6 @@ package sys import ( "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -69,7 +68,7 @@ func SetOOMScore(pid, score int) error { // no oom score is set, or a sore is set to 0. func GetOOMScoreAdj(pid int) (int, error) { path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/sys/oom_linux_test.go b/sys/oom_linux_test.go index 3cea56a..0c8f90b 100644 --- a/sys/oom_linux_test.go +++ b/sys/oom_linux_test.go @@ -20,11 +20,11 @@ import ( "errors" "fmt" "os" - "os/exec" "testing" "time" "github.com/containerd/containerd/pkg/userns" + exec "golang.org/x/sys/execabs" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" ) diff --git a/sys/oom_unsupported.go b/sys/oom_unsupported.go index f5d7e97..fa0db5a 100644 --- a/sys/oom_unsupported.go +++ b/sys/oom_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/sys/reaper/reaper_unix.go b/sys/reaper/reaper_unix.go index 0033178..6c4f13b 100644 --- a/sys/reaper/reaper_unix.go +++ b/sys/reaper/reaper_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,12 +20,14 @@ package reaper import ( - "os/exec" + "errors" + "fmt" "sync" + "syscall" "time" runc "github.com/containerd/go-runc" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -115,6 +118,38 @@ func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { return -1, ErrNoSuchProcess } +// WaitTimeout is used to skip the blocked command and kill the left process. +func (m *Monitor) WaitTimeout(c *exec.Cmd, ec chan runc.Exit, timeout time.Duration) (int, error) { + type exitStatusWrapper struct { + status int + err error + } + + // capacity can make sure that the following goroutine will not be + // blocked if there is no receiver when timeout. + waitCh := make(chan *exitStatusWrapper, 1) + go func() { + defer close(waitCh) + + status, err := m.Wait(c, ec) + waitCh <- &exitStatusWrapper{ + status: status, + err: err, + } + }() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + select { + case <-timer.C: + syscall.Kill(c.Process.Pid, syscall.SIGKILL) + return 0, fmt.Errorf("timeout %v for cmd(pid=%d): %s, %s", timeout, c.Process.Pid, c.Path, c.Args) + case res := <-waitCh: + return res.status, res.err + } +} + // Subscribe to process exit changes func (m *Monitor) Subscribe() chan runc.Exit { c := make(chan runc.Exit, bufferSize) diff --git a/sys/socket_unix.go b/sys/socket_unix.go index b67cc1f..367e19c 100644 --- a/sys/socket_unix.go +++ b/sys/socket_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,11 +20,11 @@ package sys import ( + "fmt" "net" "os" "path/filepath" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -31,7 +32,7 @@ import ( func CreateUnixSocket(path string) (net.Listener, error) { // BSDs have a 104 limit if len(path) > 104 { - return nil, errors.Errorf("%q: unix socket path too long (> 104)", path) + return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path) } if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { return nil, err diff --git a/sys/socket_windows.go b/sys/socket_windows.go index 3ee7679..1ae12bc 100644 --- a/sys/socket_windows.go +++ b/sys/socket_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/sys/stat_bsd.go b/sys/stat_bsd.go deleted file mode 100644 index 4f03cd6..0000000 --- a/sys/stat_bsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the access time from a stat struct -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atimespec -} - -// StatCtime returns the created time from a stat struct -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctimespec -} - -// StatMtime returns the modified time from a stat struct -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtimespec -} - -// StatATimeAsTime returns the access time as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert -} diff --git a/sys/stat_openbsd.go b/sys/stat_openbsd.go deleted file mode 100644 index ec3b9df..0000000 --- a/sys/stat_openbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - // The int64 conversions ensure the line compiles for 32-bit systems as well. - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/sys/stat_unix.go b/sys/stat_unix.go deleted file mode 100644 index 21a666d..0000000 --- a/sys/stat_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build linux solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/task.go b/task.go index 4e23fb8..ef8cd44 100644 --- a/task.go +++ b/task.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" goruntime "runtime" @@ -46,7 +47,6 @@ import ( is "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // UnknownExitStatus is returned when containerd is unable to @@ -139,6 +139,11 @@ type TaskInfo struct { RootFS []mount.Mount // Options hold runtime specific settings for task creation Options interface{} + // RuntimePath is an absolute path that can be used to overwrite path + // to a shim runtime binary. + RuntimePath string + + // runtime is the runtime name for the container, and cannot be changed. runtime string } @@ -310,12 +315,26 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat // On windows Created is akin to Stopped break } + if t.pid == 0 { + // allow for deletion of created tasks with PID 0 + // https://github.com/containerd/containerd/issues/7357 + break + } fallthrough default: - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "task must be stopped before deletion: %s", status.Status) + return nil, fmt.Errorf("task must be stopped before deletion: %s: %w", status.Status, errdefs.ErrFailedPrecondition) } if t.io != nil { - t.io.Close() + // io.Wait locks for restored tasks on Windows unless we call + // io.Close first (https://github.com/containerd/containerd/issues/5621) + // in other cases, preserve the contract and let IO finish before closing + if t.client.runtime == fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "windows") { + t.io.Close() + } + // io.Cancel is used to cancel the io goroutine while it is in + // fifo-opening state. It does not stop the pipes since these + // should be closed on the shim's side, otherwise we might lose + // data from the container! t.io.Cancel() t.io.Wait() } @@ -334,7 +353,7 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creator) (_ Process, err error) { if id == "" { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty") + return nil, fmt.Errorf("exec id must not be empty: %w", errdefs.ErrInvalidArgument) } i, err := ioCreate(id) if err != nil { @@ -555,7 +574,7 @@ func (t *task) LoadProcess(ctx context.Context, id string, ioAttach cio.Attach) if err != nil { err = errdefs.FromGRPC(err) if errdefs.IsNotFound(err) { - return nil, errors.Wrapf(err, "no running process found") + return nil, fmt.Errorf("no running process found: %w", err) } return nil, err } diff --git a/task_opts.go b/task_opts.go index e8d99eb..67e6527 100644 --- a/task_opts.go +++ b/task_opts.go @@ -19,6 +19,7 @@ package containerd import ( "context" "encoding/json" + "errors" "fmt" "syscall" @@ -31,7 +32,6 @@ import ( "github.com/containerd/containerd/runtime/v2/runc/options" imagespec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // NewTaskOpts allows the caller to set options on a new task @@ -45,6 +45,15 @@ func WithRootFS(mounts []mount.Mount) NewTaskOpts { } } +// WithRuntimePath will force task service to use a custom path to the runtime binary +// instead of resolving it from runtime name. +func WithRuntimePath(absRuntimePath string) NewTaskOpts { + return func(ctx context.Context, client *Client, info *TaskInfo) error { + info.RuntimePath = absRuntimePath + return nil + } +} + // WithTaskCheckpoint allows a task to be created with live runtime and memory data from a // previous checkpoint. Additional software such as CRIU may be required to // restore a task from a checkpoint @@ -158,7 +167,17 @@ func WithProcessKill(ctx context.Context, p Process) error { return err } if err := p.Kill(ctx, syscall.SIGKILL, WithKillAll); err != nil { - if errdefs.IsFailedPrecondition(err) || errdefs.IsNotFound(err) { + // Kill might still return an IsNotFound error, even if it actually + // killed the process. + if errdefs.IsNotFound(err) { + select { + case <-ctx.Done(): + return ctx.Err() + case <-s: + return nil + } + } + if errdefs.IsFailedPrecondition(err) { return nil } return err diff --git a/task_opts_unix.go b/task_opts_unix.go index a710b35..1d5983b 100644 --- a/task_opts_unix.go +++ b/task_opts_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,10 +21,10 @@ package containerd import ( "context" + "errors" "github.com/containerd/containerd/runtime/linux/runctypes" "github.com/containerd/containerd/runtime/v2/runc/options" - "github.com/pkg/errors" ) // WithNoNewKeyring causes tasks not to be created with a new keyring for secret storage. diff --git a/test/build-test-images.sh b/test/build-test-images.sh index 1323296..8594a3c 100755 --- a/test/build-test-images.sh +++ b/test/build-test-images.sh @@ -15,17 +15,22 @@ # limitations under the License. # This script is used to build and upload images in integration/images -# directory to gcr.io/k8s-cri-containerd repository +# directory to ghcr.io/containerd repository set -o xtrace set -o errexit set -o nounset set -o pipefail -source $(dirname "${BASH_SOURCE[0]}")/build-utils.sh -source $(dirname "${BASH_SOURCE[0]}")/init-buildx.sh +: "${PROJECT:=k8s-cri-containerd}" + +basedir="$(dirname "${BASH_SOURCE[0]}")" +source "${basedir}/build-utils.sh" +source "${basedir}/init-buildx.sh" + +ROOT="$( cd "$basedir" && pwd )"/.. cd "${ROOT}" # ignore errors if the image already exists -make -C integration/images/volume-copy-up push PROJ="gcr.io/${PROJECT:-k8s-cri-containerd}" || true -make -C integration/images/volume-ownership push PROJ="gcr.io/${PROJECT:-k8s-cri-containerd}" || true +make -C integration/images/volume-copy-up push PROJ="gcr.io/${PROJECT}" || true +make -C integration/images/volume-ownership push PROJ="gcr.io/${PROJECT}" || true diff --git a/test/build-utils.sh b/test/build-utils.sh index c84b484..408e0b7 100755 --- a/test/build-utils.sh +++ b/test/build-utils.sh @@ -14,13 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/.. - # PROJECT is the gce project to upload tarball. -PROJECT=${PROJECT:-"k8s-cri-containerd"} +: "${PROJECT:=k8s-cri-containerd}" # GOOGLE_APPLICATION_CREDENTIALS is the path of service account file. -if [ -z ${GOOGLE_APPLICATION_CREDENTIALS:-""} ]; then +if [ -z "${GOOGLE_APPLICATION_CREDENTIALS:-""}" ]; then echo "GOOGLE_APPLICATION_CREDENTIALS is not set" exit 1 fi @@ -28,20 +26,11 @@ fi # Activate gcloud service account. gcloud auth activate-service-account --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" --project="${PROJECT}" -# Kubernetes test infra uses jessie and stretch. -if cat /etc/os-release | grep jessie; then - sh -c "echo 'deb http://ftp.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list" - apt-get update - apt-get install -y libseccomp2/jessie-backports - apt-get install -y libseccomp-dev/jessie-backports -else - cat /etc/os-release - apt-get update - apt-get install -y libseccomp2 - apt-get install -y libseccomp-dev -fi +cat /etc/os-release +apt-get update +apt-get install -y libseccomp2 libseccomp-dev # PULL_REFS is from prow. -if [ ! -z "${PULL_REFS:-""}" ]; then +if [ -n "${PULL_REFS:-""}" ]; then DEPLOY_DIR=$(echo "${PULL_REFS}" | sha1sum | awk '{print $1}') fi diff --git a/test/build.sh b/test/build.sh index ff6dd34..8e782ec 100755 --- a/test/build.sh +++ b/test/build.sh @@ -22,7 +22,10 @@ set -o errexit set -o nounset set -o pipefail -source $(dirname "${BASH_SOURCE[0]}")/build-utils.sh +basedir="$(dirname "${BASH_SOURCE[0]}")" +source "${basedir}/build-utils.sh" + +ROOT="$( cd "${basedir}" && pwd )"/.. cd "${ROOT}" # Make sure output directory is clean. @@ -35,16 +38,16 @@ BUILDDIR=$(mktemp -d) cleanup() { if [[ ${BUILDDIR} == /tmp/* ]]; then echo "[-] REMOVING ${BUILDDIR}" - rm -rf ${BUILDDIR} + rm -rf "${BUILDDIR}" fi } trap cleanup EXIT set -x latest=$(readlink ./releases/cri-cni-containerd.tar.gz) -tarball=$(echo ${latest} | sed -e 's/cri-containerd-cni/containerd-cni/g' | sed -e 's/-linux-amd64/.linux-amd64/g') -cp releases/${latest} ${BUILDDIR}/${tarball} -cp releases/${latest}.sha256sum ${BUILDDIR}/${tarball}.sha256 +tarball=$(echo "${latest}" | sed -e 's/cri-containerd-cni/containerd-cni/g' | sed -e 's/-linux-amd64/.linux-amd64/g') +cp "releases/${latest}" "${BUILDDIR}/${tarball}" +cp "releases/${latest}.sha256sum" "${BUILDDIR}/${tarball}.sha256" # Push test tarball to Google cloud storage. VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always) @@ -55,4 +58,4 @@ else DEPLOY_DIR="containerd/${DEPLOY_DIR}" fi -PUSH_VERSION=true DEPLOY_DIR=${DEPLOY_DIR} TARBALL=${tarball} VERSION=${VERSION#v} BUILD_DIR=${BUILDDIR} ${ROOT}/test/push.sh +PUSH_VERSION=true DEPLOY_DIR=${DEPLOY_DIR} TARBALL=${tarball} VERSION=${VERSION#v} BUILD_DIR=${BUILDDIR} "${ROOT}/test/push.sh" diff --git a/test/e2e_node/gci-init.sh b/test/e2e_node/gci-init.sh index 2ca97ef..83da281 100755 --- a/test/e2e_node/gci-init.sh +++ b/test/e2e_node/gci-init.sh @@ -16,6 +16,37 @@ # This script is used to do extra initialization on GCI. +# set up cgroupv2 based on flag CONTAINERD_CGROUPV2 in containerd-env +CONTAINERD_HOME="/home/containerd" +CONTAINERD_ENV_METADATA="containerd-env" + +if [ -f "${CONTAINERD_HOME}/${CONTAINERD_ENV_METADATA}" ]; then + source "${CONTAINERD_HOME}/${CONTAINERD_ENV_METADATA}" +fi + +if [ "${CONTAINERD_CGROUPV2:-"false"}" == "true" ]; then + # check cos image + if [ -r /etc/os-release ]; then + OS_ID="$(. /etc/os-release && echo "$ID")" + fi + if [ "${OS_ID}" = "cos" ]; then + if ! grep -q 'systemd.unified_cgroup_hierarchy=true' /proc/cmdline && [ "$(stat -fc %T /sys/fs/cgroup/)" != "cgroup2fs" ]; then + echo "Setting up cgroupv2" + + mount_path="/tmp/esp" + mkdir -p "${mount_path}" + esp_partition="/dev/sda12" + mount "${esp_partition}" "${mount_path}" + sed -i 's/systemd.unified_cgroup_hierarchy=false/systemd.unified_cgroup_hierarchy=true/g' "${mount_path}/efi/boot/grub.cfg" + umount "${mount_path}" + rmdir "${mount_path}" + + echo "Reconfigured grub; rebooting..." + reboot + fi + fi +fi + mount /tmp /tmp -o remount,exec,suid #TODO(random-liu): Stop docker and remove this docker thing. usermod -a -G docker jenkins diff --git a/test/init-buildx.sh b/test/init-buildx.sh index 99b5654..ee0a05a 100755 --- a/test/init-buildx.sh +++ b/test/init-buildx.sh @@ -74,7 +74,7 @@ fi # Ensure qemu is in binfmt_misc # NOTE: Please always pin this to a digest for predictability/auditability # Last updated: 08/21/2020 -if [ "$(uname)" == 'Linux' ]; then +if [ "$(uname)" = 'Linux' ]; then docker run --rm --privileged multiarch/qemu-user-static@sha256:c772ee1965aa0be9915ee1b018a0dd92ea361b4fa1bcab5bbc033517749b2af4 --reset -p yes fi diff --git a/test/push.sh b/test/push.sh index 94711f1..851da44 100755 --- a/test/push.sh +++ b/test/push.sh @@ -18,10 +18,10 @@ set -o errexit set -o nounset set -o pipefail -source $(dirname "${BASH_SOURCE[0]}")/utils.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils.sh" # DEPLOY_BUCKET is the gcs bucket where the tarball should be stored in. -DEPLOY_BUCKET=${DEPLOY_BUCKET:-"cri-containerd-staging"} +DEPLOY_BUCKET=${DEPLOY_BUCKET:-"k8s-staging-cri-tools"} # DEPLOY_DIR is the directory in the gcs bucket to store the tarball. DEPLOY_DIR=${DEPLOY_DIR:-""} # BUILD_DIR is the directory of the build out. @@ -41,7 +41,7 @@ if [[ ! -e ${release_tar} || ! -e ${release_tar_checksum} ]]; then fi if ! gsutil ls "gs://${DEPLOY_BUCKET}" > /dev/null; then - create_ttl_bucket ${DEPLOY_BUCKET} + create_ttl_bucket "${DEPLOY_BUCKET}" fi if [ -z "${DEPLOY_DIR}" ]; then @@ -50,8 +50,8 @@ else DEPLOY_PATH="${DEPLOY_BUCKET}/${DEPLOY_DIR}" fi -gsutil cp ${release_tar} "gs://${DEPLOY_PATH}/" -gsutil cp ${release_tar_checksum} "gs://${DEPLOY_PATH}/" +gsutil cp "${release_tar}" "gs://${DEPLOY_PATH}/" +gsutil cp "${release_tar_checksum}" "gs://${DEPLOY_PATH}/" echo "Release tarball is uploaded to: https://storage.googleapis.com/${DEPLOY_PATH}/${TARBALL}" @@ -60,7 +60,7 @@ if ${PUSH_VERSION}; then echo "VERSION is not set" exit 1 fi - echo ${VERSION} | gsutil cp - "gs://${DEPLOY_PATH}/${LATEST}" + echo "${VERSION}" | gsutil cp - "gs://${DEPLOY_PATH}/${LATEST}" echo "Latest version is uploaded to: https://storage.googleapis.com/${DEPLOY_PATH}/${LATEST}" fi diff --git a/test/utils.sh b/test/utils.sh index 79e97f6..a031501 100755 --- a/test/utils.sh +++ b/test/utils.sh @@ -26,11 +26,11 @@ upload_logs_to_gcs() { local -r dir=$2 local -r result=$3 if ! gsutil ls "gs://${bucket}" > /dev/null; then - create_ttl_bucket ${bucket} + create_ttl_bucket "${bucket}" fi local -r upload_log_path=${bucket}/${dir} gsutil cp -r "${result}" "gs://${upload_log_path}" - echo "Test logs are uploaed to: + echo "Test logs are uploaded to: http://gcsweb.k8s.io/gcs/${upload_log_path}/" } @@ -43,7 +43,7 @@ create_ttl_bucket() { gsutil mb "gs://${bucket}" local -r bucket_rule=$(mktemp) # Set 30 day TTL for logs inside the bucket. - echo '{"rule": [{"action": {"type": "Delete"},"condition": {"age": 30}}]}' > ${bucket_rule} + echo '{"rule": [{"action": {"type": "Delete"},"condition": {"age": 30}}]}' > "${bucket_rule}" gsutil lifecycle set "${bucket_rule}" "gs://${bucket}" rm "${bucket_rule}" diff --git a/tracing/log.go b/tracing/log.go new file mode 100644 index 0000000..6c6dd6d --- /dev/null +++ b/tracing/log.go @@ -0,0 +1,130 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "encoding/json" + "fmt" + + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// NewLogrusHook creates a new logrus hook +func NewLogrusHook() *LogrusHook { + return &LogrusHook{} +} + +// LogrusHook is a logrus hook which adds logrus events to active spans. +// If the span is not recording or the span context is invalid, the hook is a no-op. +type LogrusHook struct{} + +// Levels returns the logrus levels that this hook is interested in. +func (h *LogrusHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// Fire is called when a log event occurs. +func (h *LogrusHook) Fire(entry *logrus.Entry) error { + span := trace.SpanFromContext(entry.Context) + if span == nil { + return nil + } + + if !span.SpanContext().IsValid() || !span.IsRecording() { + return nil + } + + span.AddEvent( + entry.Message, + trace.WithAttributes(logrusDataToAttrs(entry.Data)...), + trace.WithAttributes(attribute.String("level", entry.Level.String())), + trace.WithTimestamp(entry.Time), + ) + + return nil +} + +func logrusDataToAttrs(data logrus.Fields) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0, len(data)) + for k, v := range data { + attrs = append(attrs, any(k, v)) + } + return attrs +} + +func any(k string, v interface{}) attribute.KeyValue { + if v == nil { + return attribute.String(k, "") + } + + switch typed := v.(type) { + case bool: + return attribute.Bool(k, typed) + case []bool: + return attribute.BoolSlice(k, typed) + case int: + return attribute.Int(k, typed) + case []int: + return attribute.IntSlice(k, typed) + case int8: + return attribute.Int(k, int(typed)) + case []int8: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int16: + return attribute.Int(k, int(typed)) + case []int16: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int32: + return attribute.Int64(k, int64(typed)) + case []int32: + ls := make([]int64, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int64(i)) + } + return attribute.Int64Slice(k, ls) + case int64: + return attribute.Int64(k, typed) + case []int64: + return attribute.Int64Slice(k, typed) + case float64: + return attribute.Float64(k, typed) + case []float64: + return attribute.Float64Slice(k, typed) + case string: + return attribute.String(k, typed) + case []string: + return attribute.StringSlice(k, typed) + } + + if stringer, ok := v.(fmt.Stringer); ok { + return attribute.String(k, stringer.String()) + } + if b, err := json.Marshal(v); b != nil && err == nil { + return attribute.String(k, string(b)) + } + return attribute.String(k, fmt.Sprintf("%v", v)) +} diff --git a/tracing/plugin/otlp.go b/tracing/plugin/otlp.go new file mode 100644 index 0000000..95bf550 --- /dev/null +++ b/tracing/plugin/otlp.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "context" + "fmt" + "io" + "net/url" + "time" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/plugin" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" +) + +const exporterPlugin = "otlp" + +func init() { + plugin.Register(&plugin.Registration{ + ID: exporterPlugin, + Type: plugin.TracingProcessorPlugin, + Config: &OTLPConfig{}, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + cfg := ic.Config.(*OTLPConfig) + if cfg.Endpoint == "" { + return nil, fmt.Errorf("no OpenTelemetry endpoint: %w", plugin.ErrSkipPlugin) + } + exp, err := newExporter(ic.Context, cfg) + if err != nil { + return nil, err + } + return trace.NewBatchSpanProcessor(exp), nil + }, + }) + plugin.Register(&plugin.Registration{ + ID: "tracing", + Type: plugin.InternalPlugin, + Requires: []plugin.Type{plugin.TracingProcessorPlugin}, + Config: &TraceConfig{ServiceName: "containerd", TraceSamplingRatio: 1.0}, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return newTracer(ic) + }, + }) +} + +// OTLPConfig holds the configurations for the built-in otlp span processor +type OTLPConfig struct { + Endpoint string `toml:"endpoint"` + Protocol string `toml:"protocol"` + Insecure bool `toml:"insecure"` +} + +// TraceConfig is the common configuration for open telemetry. +type TraceConfig struct { + ServiceName string `toml:"service_name"` + TraceSamplingRatio float64 `toml:"sampling_ratio"` +} + +type closer struct { + close func() error +} + +func (c *closer) Close() error { + return c.close() +} + +// newExporter creates an exporter based on the given configuration. +// +// The default protocol is http/protobuf since it is recommended by +// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md#specify-protocol. +func newExporter(ctx context.Context, cfg *OTLPConfig) (*otlptrace.Exporter, error) { + const timeout = 5 * time.Second + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + if cfg.Protocol == "http/protobuf" || cfg.Protocol == "" { + u, err := url.Parse(cfg.Endpoint) + if err != nil { + return nil, fmt.Errorf("OpenTelemetry endpoint %q is invalid: %w", cfg.Endpoint, err) + } + opts := []otlptracehttp.Option{ + otlptracehttp.WithEndpoint(u.Host), + } + if u.Scheme == "http" { + opts = append(opts, otlptracehttp.WithInsecure()) + } + return otlptracehttp.New(ctx, opts...) + } else if cfg.Protocol == "grpc" { + opts := []otlptracegrpc.Option{ + otlptracegrpc.WithEndpoint(cfg.Endpoint), + } + if cfg.Insecure { + opts = append(opts, otlptracegrpc.WithInsecure()) + } + return otlptracegrpc.New(ctx, opts...) + } else { + // Other protocols such as "http/json" are not supported. + return nil, fmt.Errorf("OpenTelemetry protocol %q is not supported", cfg.Protocol) + } +} + +// newTracer configures protocol-agonostic tracing settings such as +// its sampling ratio and returns io.Closer. +// +// Note that this function sets process-wide tracing configuration. +func newTracer(ic *plugin.InitContext) (io.Closer, error) { + ctx := ic.Context + config := ic.Config.(*TraceConfig) + + res, err := resource.New(ctx, + resource.WithAttributes( + // Service name used to displace traces in backends + semconv.ServiceNameKey.String(config.ServiceName), + ), + ) + if err != nil { + return nil, fmt.Errorf("failed to create resource: %w", err) + } + + opts := []sdktrace.TracerProviderOption{ + sdktrace.WithSampler(sdktrace.TraceIDRatioBased(config.TraceSamplingRatio)), + sdktrace.WithResource(res), + } + + ls, err := ic.GetByType(plugin.TracingProcessorPlugin) + if err != nil { + return nil, fmt.Errorf("failed to get tracing processors: %w", err) + } + + procs := make([]sdktrace.SpanProcessor, 0, len(ls)) + for id, pctx := range ls { + p, err := pctx.Instance() + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to initialize a tracing processor %q", id) + continue + } + proc := p.(sdktrace.SpanProcessor) + opts = append(opts, sdktrace.WithSpanProcessor(proc)) + procs = append(procs, proc) + } + + provider := sdktrace.NewTracerProvider(opts...) + + otel.SetTracerProvider(provider) + otel.SetTextMapPropagator(propagation.TraceContext{}) + + return &closer{close: func() error { + for _, p := range procs { + if err := p.Shutdown(ctx); err != nil { + return err + } + } + return nil + }}, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_darwinopenbsdsolaris.go b/tracing/tracing.go similarity index 53% rename from vendor/github.com/containerd/continuity/fs/copy_darwinopenbsdsolaris.go rename to tracing/tracing.go index 92ccacf..d3ecfb5 100644 --- a/vendor/github.com/containerd/continuity/fs/copy_darwinopenbsdsolaris.go +++ b/tracing/tracing.go @@ -1,5 +1,3 @@ -// +build darwin openbsd solaris - /* Copyright The containerd Authors. @@ -16,25 +14,24 @@ limitations under the License. */ -package fs +package tracing import ( - "os" - "syscall" + "context" - "github.com/pkg/errors" - "golang.org/x/sys/unix" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" ) -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") +// StartSpan starts child span in a context. +func StartSpan(ctx context.Context, opName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() { + return parent.TracerProvider().Tracer("").Start(ctx, opName, opts...) } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) + return otel.Tracer("").Start(ctx, opName, opts...) } -func utimesNano(name string, atime, mtime syscall.Timespec) error { - timespec := []syscall.Timespec{atime, mtime} - return syscall.UtimesNano(name, timespec) +// StopSpan ends the span specified +func StopSpan(span trace.Span) { + span.End() } diff --git a/unpacker.go b/unpacker.go index 76f5d7b..29bd595 100644 --- a/unpacker.go +++ b/unpacker.go @@ -18,10 +18,11 @@ package containerd import ( "context" + "crypto/rand" "encoding/base64" "encoding/json" + "errors" "fmt" - "math/rand" "sync" "sync/atomic" "time" @@ -31,12 +32,12 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/kmutex" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" @@ -59,17 +60,24 @@ func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacke if err != nil { return nil, err } - var config UnpackConfig + var config = UnpackConfig{ + DuplicationSuppressor: kmutex.NewNoop(), + } for _, o := range rCtx.UnpackOpts { if err := o(ctx, &config); err != nil { return nil, err } } + var limiter *semaphore.Weighted + if rCtx.MaxConcurrentDownloads > 0 { + limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads)) + } return &unpacker{ updateCh: make(chan ocispec.Descriptor, 128), snapshotter: snapshotter, config: config, c: c, + limiter: limiter, }, nil } @@ -87,18 +95,18 @@ func (u *unpacker) unpack( var i ocispec.Image if err := json.Unmarshal(p, &i); err != nil { - return errors.Wrap(err, "unmarshal image config") + return fmt.Errorf("unmarshal image config: %w", err) } diffIDs := i.RootFS.DiffIDs if len(layers) != len(diffIDs) { - return errors.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) + return fmt.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) } if u.config.CheckPlatformSupported { imgPlatform := platforms.Normalize(ocispec.Platform{OS: i.OS, Architecture: i.Architecture}) snapshotterPlatformMatcher, err := u.c.GetSnapshotterSupportedPlatforms(ctx, u.snapshotter) if err != nil { - return errors.Wrapf(err, "failed to find supported platforms for snapshotter %s", u.snapshotter) + return fmt.Errorf("failed to find supported platforms for snapshotter %s: %w", u.snapshotter, err) } if !snapshotterPlatformMatcher.Match(imgPlatform) { return fmt.Errorf("snapshotter %s does not support platform %s for image %s", u.snapshotter, imgPlatform, config.Digest) @@ -122,17 +130,22 @@ func (u *unpacker) unpack( ctx, cancel := context.WithCancel(ctx) defer cancel() -EachLayer: - for i, desc := range layers { + doUnpackFn := func(i int, desc ocispec.Descriptor) error { parent := identity.ChainID(chain) chain = append(chain, diffIDs[i]) - chainID := identity.ChainID(chain).String() + + unlock, err := u.lockSnChainID(ctx, chainID) + if err != nil { + return err + } + defer unlock() + if _, err := sn.Stat(ctx, chainID); err == nil { // no need to handle - continue + return nil } else if !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to stat snapshot %s", chainID) + return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) } // inherits annotations which are provided as snapshot labels. @@ -156,23 +169,23 @@ EachLayer: if errdefs.IsAlreadyExists(err) { if _, err := sn.Stat(ctx, chainID); err != nil { if !errdefs.IsNotFound(err) { - return errors.Wrapf(err, "failed to stat snapshot %s", chainID) + return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) } // Try again, this should be rare, log it log.G(ctx).WithField("key", key).WithField("chainid", chainID).Debug("extraction snapshot already exists, chain id not found") } else { // no need to handle, snapshot now found with chain id - continue EachLayer + return nil } } else { - return errors.Wrapf(err, "failed to prepare extraction snapshot %q", key) + return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err) } } else { break } } if err != nil { - return errors.Wrap(err, "unable to prepare extraction snapshot") + return fmt.Errorf("unable to prepare extraction snapshot: %w", err) } // Abort the snapshot if commit does not happen @@ -212,19 +225,19 @@ EachLayer: diff, err := a.Apply(ctx, desc, mounts, u.config.ApplyOpts...) if err != nil { abort() - return errors.Wrapf(err, "failed to extract layer %s", diffIDs[i]) + return fmt.Errorf("failed to extract layer %s: %w", diffIDs[i], err) } if diff.Digest != diffIDs[i] { abort() - return errors.Errorf("wrong diff id calculated on extraction %q", diffIDs[i]) + return fmt.Errorf("wrong diff id calculated on extraction %q", diffIDs[i]) } if err = sn.Commit(ctx, chainID, key, opts...); err != nil { abort() if errdefs.IsAlreadyExists(err) { - continue + return nil } - return errors.Wrapf(err, "failed to commit snapshot %s", key) + return fmt.Errorf("failed to commit snapshot %s: %w", key, err) } // Set the uncompressed label after the uncompressed @@ -238,7 +251,13 @@ EachLayer: if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { return err } + return nil + } + for i, desc := range layers { + if err := doUnpackFn(i, desc); err != nil { + return err + } } chainID := identity.ChainID(chain).String() @@ -266,17 +285,22 @@ func (u *unpacker) fetch(ctx context.Context, h images.Handler, layers []ocispec desc := desc i := i - if u.limiter != nil { - if err := u.limiter.Acquire(ctx, 1); err != nil { - return err - } + if err := u.acquire(ctx); err != nil { + return err } eg.Go(func() error { - _, err := h.Handle(ctx2, desc) - if u.limiter != nil { - u.limiter.Release(1) + unlock, err := u.lockBlobDescriptor(ctx2, desc) + if err != nil { + u.release() + return err } + + _, err = h.Handle(ctx2, desc) + + unlock() + u.release() + if err != nil && !errors.Is(err, images.ErrSkipDesc) { return err } @@ -301,7 +325,13 @@ func (u *unpacker) handlerWrapper( layers = map[digest.Digest][]ocispec.Descriptor{} ) return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + unlock, err := u.lockBlobDescriptor(ctx, desc) + if err != nil { + return nil, err + } + children, err := f.Handle(ctx, desc) + unlock() if err != nil { return children, err } @@ -344,6 +374,50 @@ func (u *unpacker) handlerWrapper( }, eg } +func (u *unpacker) acquire(ctx context.Context) error { + if u.limiter == nil { + return nil + } + return u.limiter.Acquire(ctx, 1) +} + +func (u *unpacker) release() { + if u.limiter == nil { + return + } + u.limiter.Release(1) +} + +func (u *unpacker) lockSnChainID(ctx context.Context, chainID string) (func(), error) { + key := u.makeChainIDKeyWithSnapshotter(chainID) + + if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.config.DuplicationSuppressor.Unlock(key) + }, nil +} + +func (u *unpacker) lockBlobDescriptor(ctx context.Context, desc ocispec.Descriptor) (func(), error) { + key := u.makeBlobDescriptorKey(desc) + + if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.config.DuplicationSuppressor.Unlock(key) + }, nil +} + +func (u *unpacker) makeChainIDKeyWithSnapshotter(chainID string) string { + return fmt.Sprintf("sn://%s/%v", u.snapshotter, chainID) +} + +func (u *unpacker) makeBlobDescriptorKey(desc ocispec.Descriptor) string { + return fmt.Sprintf("blob://%v", desc.Digest) +} + func uniquePart() string { t := time.Now() var b [3]byte diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index b883f1f..0000000 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.exe diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS deleted file mode 100644 index ae1b494..0000000 --- a/vendor/github.com/Microsoft/go-winio/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ - * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE deleted file mode 100644 index b8b569d..0000000 --- a/vendor/github.com/Microsoft/go-winio/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 5680010..0000000 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# go-winio - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. - -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index 2be34af..0000000 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,280 +0,0 @@ -// +build windows - -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "syscall" - "unicode/utf16" -) - -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamId struct { - StreamId uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { - if s, ok := r.r.(io.Seeker); ok { - // Make sure Seek on io.SeekCurrent sometimes succeeds - // before trying the actual seek. - if _, err := s.Seek(0, io.SeekCurrent); err == nil { - if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { - return nil, err - } - r.bytesLeft = 0 - } - } - if _, err := io.Copy(ioutil.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamId - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamId, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = syscall.UTF16ToString(name) - } - if wsi.StreamId == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} - } - runtime.KeepAlive(r.f) - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - runtime.KeepAlive(r.f) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} - } - runtime.KeepAlive(w.f) - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - runtime.KeepAlive(w.f) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go deleted file mode 100644 index d39eccf..0000000 --- a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go +++ /dev/null @@ -1,4 +0,0 @@ -// +build !windows -// This file only exists to allow go get on non-Windows platforms. - -package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go deleted file mode 100644 index 3416096..0000000 --- a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go +++ /dev/null @@ -1,68 +0,0 @@ -package backuptar - -import ( - "archive/tar" - "fmt" - "strconv" - "strings" - "time" -) - -// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go -// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually. -// Idea taken from containerd which did the same thing. - -// parsePAXTime takes a string of the form %d.%d as described in the PAX -// specification. Note that this implementation allows for negative timestamps, -// which is allowed for by the PAX specification, but not always portable. -func parsePAXTime(s string) (time.Time, error) { - const maxNanoSecondDigits = 9 - - // Split string into seconds and sub-seconds parts. - ss, sn := s, "" - if pos := strings.IndexByte(s, '.'); pos >= 0 { - ss, sn = s[:pos], s[pos+1:] - } - - // Parse the seconds. - secs, err := strconv.ParseInt(ss, 10, 64) - if err != nil { - return time.Time{}, tar.ErrHeader - } - if len(sn) == 0 { - return time.Unix(secs, 0), nil // No sub-second values - } - - // Parse the nanoseconds. - if strings.Trim(sn, "0123456789") != "" { - return time.Time{}, tar.ErrHeader - } - if len(sn) < maxNanoSecondDigits { - sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad - } else { - sn = sn[:maxNanoSecondDigits] // Right truncate - } - nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed - if len(ss) > 0 && ss[0] == '-' { - return time.Unix(secs, -1*nsecs), nil // Negative correction - } - return time.Unix(secs, nsecs), nil -} - -// formatPAXTime converts ts into a time of the form %d.%d as described in the -// PAX specification. This function is capable of negative timestamps. -func formatPAXTime(ts time.Time) (s string) { - secs, nsecs := ts.Unix(), ts.Nanosecond() - if nsecs == 0 { - return strconv.FormatInt(secs, 10) - } - - // If seconds is negative, then perform correction. - sign := "" - if secs < 0 { - sign = "-" // Remember sign - secs = -(secs + 1) // Add a second to secs - nsecs = -(nsecs - 1e9) // Take that second away from nsecs - } - return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") -} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go deleted file mode 100644 index cb461ca..0000000 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ /dev/null @@ -1,452 +0,0 @@ -// +build windows - -package backuptar - -import ( - "archive/tar" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Microsoft/go-winio" - "golang.org/x/sys/windows" -) - -const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -const ( - hdrFileAttributes = "MSWINDOWS.fileattr" - hdrSecurityDescriptor = "MSWINDOWS.sd" - hdrRawSecurityDescriptor = "MSWINDOWS.rawsd" - hdrMountPoint = "MSWINDOWS.mountpoint" - hdrEaPrefix = "MSWINDOWS.xattr." - - hdrCreationTime = "LIBARCHIVE.creationtime" -) - -func writeZeroes(w io.Writer, count int64) error { - buf := make([]byte, 8192) - c := len(buf) - for i := int64(0); i < count; i += int64(c) { - if int64(c) > count-i { - c = int(count - i) - } - _, err := w.Write(buf[:c]) - if err != nil { - return err - } - } - return nil -} - -func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { - curOffset := int64(0) - for { - bhdr, err := br.Next() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - return err - } - if bhdr.Id != winio.BackupSparseBlock { - return fmt.Errorf("unexpected stream %d", bhdr.Id) - } - - // archive/tar does not support writing sparse files - // so just write zeroes to catch up to the current offset. - err = writeZeroes(t, bhdr.Offset-curOffset) - if bhdr.Size == 0 { - break - } - n, err := io.Copy(t, br) - if err != nil { - return err - } - curOffset = bhdr.Offset + n - } - return nil -} - -// BasicInfoHeader creates a tar header from basic file information. -func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { - hdr := &tar.Header{ - Format: tar.FormatPAX, - Name: filepath.ToSlash(name), - Size: size, - Typeflag: tar.TypeReg, - ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), - ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), - AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), - PAXRecords: make(map[string]string), - } - hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) - hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) - - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - hdr.Mode |= c_ISDIR - hdr.Size = 0 - hdr.Typeflag = tar.TypeDir - } - return hdr -} - -// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. -// -// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. -// -// The additional Win32 metadata is: -// -// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value -// -// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format -// -// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) -func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { - name = filepath.ToSlash(name) - hdr := BasicInfoHeader(name, size, fileInfo) - - // If r can be seeked, then this function is two-pass: pass 1 collects the - // tar header data, and pass 2 copies the data stream. If r cannot be - // seeked, then some header data (in particular EAs) will be silently lost. - var ( - restartPos int64 - err error - ) - sr, readTwice := r.(io.Seeker) - if readTwice { - if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { - readTwice = false - } - } - - br := winio.NewBackupStreamReader(r) - var dataHdr *winio.BackupHeader - for dataHdr == nil { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupData: - hdr.Mode |= c_ISREG - if !readTwice { - dataHdr = bhdr - } - case winio.BackupSecurity: - sd, err := ioutil.ReadAll(br) - if err != nil { - return err - } - hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) - - case winio.BackupReparseData: - hdr.Mode |= c_ISLNK - hdr.Typeflag = tar.TypeSymlink - reparseBuffer, err := ioutil.ReadAll(br) - rp, err := winio.DecodeReparsePoint(reparseBuffer) - if err != nil { - return err - } - if rp.IsMountPoint { - hdr.PAXRecords[hdrMountPoint] = "1" - } - hdr.Linkname = rp.Target - - case winio.BackupEaData: - eab, err := ioutil.ReadAll(br) - if err != nil { - return err - } - eas, err := winio.DecodeExtendedAttributes(eab) - if err != nil { - return err - } - for _, ea := range eas { - // Use base64 encoding for the binary value. Note that there - // is no way to encode the EA's flags, since their use doesn't - // make any sense for persisted EAs. - hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) - } - - case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) - } - } - - err = t.WriteHeader(hdr) - if err != nil { - return err - } - - if readTwice { - // Get back to the data stream. - if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { - return err - } - for dataHdr == nil { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if bhdr.Id == winio.BackupData { - dataHdr = bhdr - } - } - } - - if dataHdr != nil { - // A data stream was found. Copy the data. - if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { - if size != dataHdr.Size { - return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - } else { - err = copySparse(t, br) - if err != nil { - return err - } - } - } - - // Look for streams after the data stream. The only ones we handle are alternate data streams. - // Other streams may have metadata that could be serialized, but the tar header has already - // been written. In practice, this means that we don't get EA or TXF metadata. - for { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupAlternateData: - altName := bhdr.Name - if strings.HasSuffix(altName, ":$DATA") { - altName = altName[:len(altName)-len(":$DATA")] - } - if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { - hdr = &tar.Header{ - Format: hdr.Format, - Name: name + altName, - Mode: hdr.Mode, - Typeflag: tar.TypeReg, - Size: bhdr.Size, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - err = t.WriteHeader(hdr) - if err != nil { - return err - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - - } else { - // Unsupported for now, since the size of the alternate stream is not present - // in the backup stream until after the data has been read. - return errors.New("tar of sparse alternate data streams is unsupported") - } - case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) - } - } - return nil -} - -// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by -// WriteTarFileFromBackupStream. -func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { - name = hdr.Name - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - size = hdr.Size - } - fileInfo = &winio.FileBasicInfo{ - LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()), - LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), - ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()), - // Default to ModTime, we'll pull hdrCreationTime below if present - CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), - } - if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok { - attr, err := strconv.ParseUint(attrStr, 10, 32) - if err != nil { - return "", 0, nil, err - } - fileInfo.FileAttributes = uint32(attr) - } else { - if hdr.Typeflag == tar.TypeDir { - fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY - } - } - if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok { - creationTime, err := parsePAXTime(creationTimeStr) - if err != nil { - return "", 0, nil, err - } - fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano()) - } - return -} - -// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple -// tar file entries in order to collect all the alternate data streams for the file, it returns the next -// tar file that was not processed, or io.EOF is there are no more. -func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { - bw := winio.NewBackupStreamWriter(w) - var sd []byte - var err error - // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written - // by this library will have raw binary for the security descriptor. - if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } - if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) - if err != nil { - return nil, err - } - } - if len(sd) != 0 { - bhdr := winio.BackupHeader{ - Id: winio.BackupSecurity, - Size: int64(len(sd)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(sd) - if err != nil { - return nil, err - } - } - var eas []winio.ExtendedAttribute - for k, v := range hdr.PAXRecords { - if !strings.HasPrefix(k, hdrEaPrefix) { - continue - } - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return nil, err - } - eas = append(eas, winio.ExtendedAttribute{ - Name: k[len(hdrEaPrefix):], - Value: data, - }) - } - if len(eas) != 0 { - eadata, err := winio.EncodeExtendedAttributes(eas) - if err != nil { - return nil, err - } - bhdr := winio.BackupHeader{ - Id: winio.BackupEaData, - Size: int64(len(eadata)), - } - err = bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(eadata) - if err != nil { - return nil, err - } - } - if hdr.Typeflag == tar.TypeSymlink { - _, isMountPoint := hdr.PAXRecords[hdrMountPoint] - rp := winio.ReparsePoint{ - Target: filepath.FromSlash(hdr.Linkname), - IsMountPoint: isMountPoint, - } - reparse := winio.EncodeReparsePoint(&rp) - bhdr := winio.BackupHeader{ - Id: winio.BackupReparseData, - Size: int64(len(reparse)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(reparse) - if err != nil { - return nil, err - } - } - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - bhdr := winio.BackupHeader{ - Id: winio.BackupData, - Size: hdr.Size, - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } - // Copy all the alternate data streams and return the next non-ADS header. - for { - ahdr, err := t.Next() - if err != nil { - return nil, err - } - if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { - return ahdr, nil - } - bhdr := winio.BackupHeader{ - Id: winio.BackupAlternateData, - Size: ahdr.Size, - Name: ahdr.Name[len(hdr.Name):] + ":$DATA", - } - err = bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } -} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go deleted file mode 100644 index 4051c1b..0000000 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ /dev/null @@ -1,137 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index 0385e41..0000000 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,323 +0,0 @@ -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" -) - -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} - -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle - -// ioResult contains the result of an asynchronous IO operation -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO -type ioOperation struct { - o syscall.Overlapped - ch chan ioResult -} - -func initIo() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle syscall.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomicBool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomicBool -} - -// makeWin32File makes a new win32File from an existing file handle -func makeWin32File(h syscall.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIo) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - syscall.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// prepareIo prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.isSet() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever -func ioCompletionProcessor(h syscall.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// asyncIo processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { - return int(bytes), err - } - - if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - if f.closing.isSet() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { - return 0, io.EOF - } else { - return n, err - } -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.setFalse() - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.setTrue() - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index 3ab6bff..0000000 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build windows - -package winio - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime - FileAttributes uint32 - pad uint32 // padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return bi, nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return nil -} - -// FileStandardInfo contains extended information for the file. -// FILE_STANDARD_INFO in WinBase.h -// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info -type FileStandardInfo struct { - AllocationSize, EndOfFile int64 - NumberOfLinks uint32 - DeletePending, Directory bool -} - -// GetFileStandardInfo retrieves ended information for the file. -func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { - si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return si, nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return fileID, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index b632f8f..0000000 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,307 +0,0 @@ -// +build windows - -package winio - -import ( - "fmt" - "io" - "net" - "os" - "syscall" - "time" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const ( - afHvSock = 34 // AF_HYPERV - - socketError = ^uintptr(0) -) - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -// Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHvSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = syscall.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIo() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - conn := &HvsockConn{ - sock: sock, - } - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() - if err != nil { - return nil, err - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := sock.prepareIo() - if err != nil { - return nil, err - } - defer sock.wg.Done() - var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) - if err != nil { - return nil, err - } - conn := &HvsockConn{ - sock: sock, - remote: *addr, - } - sock = nil - return conn, nil -} -*/ - -func (conn *HvsockConn) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) shutdown(how int) error { - err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) - if err != nil { - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, notifying the other endpoint that -// no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index 96700a7..0000000 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,517 +0,0 @@ -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "runtime" - "syscall" - "time" - "unsafe" -) - -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl - -type ioStatusBlock struct { - Status, Information uintptr -} - -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *unicodeString - Attributes uintptr - SecurityDescriptor *securityDescriptor - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -type securityDescriptor struct { - Revision byte - Sbz1 byte - Control uint16 - Owner uintptr - Group uintptr - Sacl uintptr - Dacl uintptr -} - -type ntstatus int32 - -func (status ntstatus) Err() error { - if status >= 0 { - return nil - } - return rtlNtStatusToDosError(status) -} - -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_NO_DATA = syscall.Errno(232) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 - - cFILE_OPEN = 1 - cFILE_CREATE = 2 - - cFILE_PIPE_MESSAGE_TYPE = 1 - cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - - cSE_DACL_PRESENT = 4 -) - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - err := f.win32File.Flush() - if err != nil { - return err - } - _, err = f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { - // ERROR_MORE_DATA indicates that the pipe's read mode is message mode - // and the message still has more bytes. Treat this as a success, since - // this package presents all named pipes as byte streams. - err = nil - } - return n, err -} - -func (s pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { - for { - - select { - case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() - default: - h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err == nil { - return h, nil - } - if err != cERROR_PIPE_BUSY { - return h, &os.PathError{Err: err, Op: "open", Path: *path} - } - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(10 * time.Millisecond) - } - } -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } else { - absTimeout = time.Now().Add(2 * time.Second) - } - ctx, _ := context.WithDeadline(context.Background(), absTimeout) - conn, err := DialPipeContext(ctx, path) - if err == context.DeadlineExceeded { - return nil, ErrTimeout - } - return conn, err -} - -// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` -// cancellation or timeout. -func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) -} - -// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` -// cancellation or timeout. -func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { - var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, access) - if err != nil { - return nil, err - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle syscall.Handle - path string - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - var oa objectAttributes - oa.Length = unsafe.Sizeof(oa) - - var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - defer localFree(ntPath.Buffer) - oa.ObjectName = &ntPath - - // The security descriptor is only needed for the first pipe. - if first { - if sd != nil { - len := uint32(len(sd)) - sdb := localAlloc(0, len) - defer localFree(sdb) - copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) - oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) - } else { - // Construct the default named pipe security descriptor. - var dacl uintptr - if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %s", err) - } - defer localFree(dacl) - - sdb := &securityDescriptor{ - Revision: 1, - Control: cSE_DACL_PRESENT, - Dacl: dacl, - } - oa.SecurityDescriptor = sdb - } - } - - typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) - if c.MessageMode { - typ |= cFILE_PIPE_MESSAGE_TYPE - } - - disposition := uint32(cFILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) - if first { - disposition = cFILE_CREATE - // By not asking for read or write access, the named pipe file system - // will put this pipe into an initially disconnected state, blocking - // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE - } - - timeout := int64(-50 * 10000) // 50ms - - var ( - h syscall.Handle - iosb ioStatusBlock - ) - err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - runtime.KeepAlive(ntPath) - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, nil, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { - p, err := l.makeServerPipe() - if err != nil { - return nil, err - } - - // Wait for the client to connect. - ch := make(chan error) - go func(p *win32File) { - ch <- connectPipe(p) - }(p) - - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { - err = ErrPipeListenerClosed - } - } - return p, err -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - var ( - p *win32File - err error - ) - for { - p, err = l.makeConnectedServerPipe() - // If the connection was immediately closed by the client, try - // again. - if err != cERROR_NO_DATA { - break - } - } - responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed - } - } - syscall.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size of the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size of the output buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - l := &win32PipeListener{ - firstHandle: h, - path: path, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIo() - if err != nil { - return err - } - defer p.wg.Done() - - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, nil, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go deleted file mode 100644 index 10cd08d..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package etw provides support for TraceLogging-based ETW (Event Tracing -// for Windows). TraceLogging is a format of ETW events that are self-describing -// (the event contains information on its own schema). This allows them to be -// decoded without needing a separate manifest with event information. The -// implementation here is based on the information found in -// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a -// set of C macros. -package etw - -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go etw.go - -//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister - -//sys eventUnregister_64(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister -//sys eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer -//sys eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation - -//sys eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) = advapi32.EventUnregister -//sys eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer -//sys eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go deleted file mode 100644 index abf1680..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build windows - -package etw - -import ( - "bytes" - "encoding/binary" - "syscall" -) - -// eventData maintains a buffer which builds up the data for an ETW event. It -// needs to be paired with EventMetadata which describes the event. -type eventData struct { - buffer bytes.Buffer -} - -// bytes returns the raw binary data containing the event data. The returned -// value is not copied from the internal buffer, so it can be mutated by the -// eventData object after it is returned. -func (ed *eventData) bytes() []byte { - return ed.buffer.Bytes() -} - -// writeString appends a string, including the null terminator, to the buffer. -func (ed *eventData) writeString(data string) { - ed.buffer.WriteString(data) - ed.buffer.WriteByte(0) -} - -// writeInt8 appends a int8 to the buffer. -func (ed *eventData) writeInt8(value int8) { - ed.buffer.WriteByte(uint8(value)) -} - -// writeInt16 appends a int16 to the buffer. -func (ed *eventData) writeInt16(value int16) { - binary.Write(&ed.buffer, binary.LittleEndian, value) -} - -// writeInt32 appends a int32 to the buffer. -func (ed *eventData) writeInt32(value int32) { - binary.Write(&ed.buffer, binary.LittleEndian, value) -} - -// writeInt64 appends a int64 to the buffer. -func (ed *eventData) writeInt64(value int64) { - binary.Write(&ed.buffer, binary.LittleEndian, value) -} - -// writeUint8 appends a uint8 to the buffer. -func (ed *eventData) writeUint8(value uint8) { - ed.buffer.WriteByte(value) -} - -// writeUint16 appends a uint16 to the buffer. -func (ed *eventData) writeUint16(value uint16) { - binary.Write(&ed.buffer, binary.LittleEndian, value) -} - -// writeUint32 appends a uint32 to the buffer. -func (ed *eventData) writeUint32(value uint32) { - binary.Write(&ed.buffer, binary.LittleEndian, value) -} - -// writeUint64 appends a uint64 to the buffer. -func (ed *eventData) writeUint64(value uint64) { - binary.Write(&ed.buffer, binary.LittleEndian, value) -} - -// writeFiletime appends a FILETIME to the buffer. -func (ed *eventData) writeFiletime(value syscall.Filetime) { - binary.Write(&ed.buffer, binary.LittleEndian, value) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go deleted file mode 100644 index 8b0ad48..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go +++ /dev/null @@ -1,29 +0,0 @@ -package etw - -import ( - "unsafe" -) - -type eventDataDescriptorType uint8 - -const ( - eventDataDescriptorTypeUserData eventDataDescriptorType = iota - eventDataDescriptorTypeEventMetadata - eventDataDescriptorTypeProviderMetadata -) - -type eventDataDescriptor struct { - ptr ptr64 - size uint32 - dataType eventDataDescriptorType - reserved1 uint8 - reserved2 uint16 -} - -func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor { - return eventDataDescriptor{ - ptr: ptr64{ptr: unsafe.Pointer(&buffer[0])}, - size: uint32(len(buffer)), - dataType: dataType, - } -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go deleted file mode 100644 index cc41f15..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go +++ /dev/null @@ -1,84 +0,0 @@ -package etw - -// Channel represents the ETW logging channel that is used. It can be used by -// event consumers to give an event special treatment. -type Channel uint8 - -const ( - // ChannelTraceLogging is the default channel for TraceLogging events. It is - // not required to be used for TraceLogging, but will prevent decoding - // issues for these events on older operating systems. - ChannelTraceLogging Channel = 11 -) - -// Level represents the ETW logging level. There are several predefined levels -// that are commonly used, but technically anything from 0-255 is allowed. -// Lower levels indicate more important events, and 0 indicates an event that -// will always be collected. -type Level uint8 - -// Predefined ETW log levels from winmeta.xml in the Windows SDK. -const ( - LevelAlways Level = iota - LevelCritical - LevelError - LevelWarning - LevelInfo - LevelVerbose -) - -// Opcode represents the operation that the event indicates is being performed. -type Opcode uint8 - -// Predefined ETW opcodes from winmeta.xml in the Windows SDK. -const ( - // OpcodeInfo indicates an informational event. - OpcodeInfo Opcode = iota - // OpcodeStart indicates the start of an operation. - OpcodeStart - // OpcodeStop indicates the end of an operation. - OpcodeStop - // OpcodeDCStart indicates the start of a provider capture state operation. - OpcodeDCStart - // OpcodeDCStop indicates the end of a provider capture state operation. - OpcodeDCStop -) - -// EventDescriptor represents various metadata for an ETW event. -type eventDescriptor struct { - id uint16 - version uint8 - channel Channel - level Level - opcode Opcode - task uint16 - keyword uint64 -} - -// NewEventDescriptor returns an EventDescriptor initialized for use with -// TraceLogging. -func newEventDescriptor() *eventDescriptor { - // Standard TraceLogging events default to the TraceLogging channel, and - // verbose level. - return &eventDescriptor{ - channel: ChannelTraceLogging, - level: LevelVerbose, - } -} - -// Identity returns the identity of the event. If the identity is not 0, it -// should uniquely identify the other event metadata (contained in -// EventDescriptor, and field metadata). Only the lower 24 bits of this value -// are relevant. -func (ed *eventDescriptor) identity() uint32 { - return (uint32(ed.version) << 16) | uint32(ed.id) -} - -// SetIdentity sets the identity of the event. If the identity is not 0, it -// should uniquely identify the other event metadata (contained in -// EventDescriptor, and field metadata). Only the lower 24 bits of this value -// are relevant. -func (ed *eventDescriptor) setIdentity(identity uint32) { - ed.id = uint16(identity) - ed.version = uint8(identity >> 16) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go deleted file mode 100644 index 6fdc126..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go +++ /dev/null @@ -1,177 +0,0 @@ -package etw - -import ( - "bytes" - "encoding/binary" -) - -// inType indicates the type of data contained in the ETW event. -type inType byte - -// Various inType definitions for TraceLogging. These must match the definitions -// found in TraceLoggingProvider.h in the Windows SDK. -const ( - inTypeNull inType = iota - inTypeUnicodeString - inTypeANSIString - inTypeInt8 - inTypeUint8 - inTypeInt16 - inTypeUint16 - inTypeInt32 - inTypeUint32 - inTypeInt64 - inTypeUint64 - inTypeFloat - inTypeDouble - inTypeBool32 - inTypeBinary - inTypeGUID - inTypePointerUnsupported - inTypeFileTime - inTypeSystemTime - inTypeSID - inTypeHexInt32 - inTypeHexInt64 - inTypeCountedString - inTypeCountedANSIString - inTypeStruct - inTypeCountedBinary - inTypeCountedArray inType = 32 - inTypeArray inType = 64 -) - -// outType specifies a hint to the event decoder for how the value should be -// formatted. -type outType byte - -// Various outType definitions for TraceLogging. These must match the -// definitions found in TraceLoggingProvider.h in the Windows SDK. -const ( - // outTypeDefault indicates that the default formatting for the inType will - // be used by the event decoder. - outTypeDefault outType = iota - outTypeNoPrint - outTypeString - outTypeBoolean - outTypeHex - outTypePID - outTypeTID - outTypePort - outTypeIPv4 - outTypeIPv6 - outTypeSocketAddress - outTypeXML - outTypeJSON - outTypeWin32Error - outTypeNTStatus - outTypeHResult - outTypeFileTime - outTypeSigned - outTypeUnsigned - outTypeUTF8 outType = 35 - outTypePKCS7WithTypeInfo outType = 36 - outTypeCodePointer outType = 37 - outTypeDateTimeUTC outType = 38 -) - -// eventMetadata maintains a buffer which builds up the metadata for an ETW -// event. It needs to be paired with EventData which describes the event. -type eventMetadata struct { - buffer bytes.Buffer -} - -// bytes returns the raw binary data containing the event metadata. Before being -// returned, the current size of the buffer is written to the start of the -// buffer. The returned value is not copied from the internal buffer, so it can -// be mutated by the eventMetadata object after it is returned. -func (em *eventMetadata) bytes() []byte { - // Finalize the event metadata buffer by filling in the buffer length at the - // beginning. - binary.LittleEndian.PutUint16(em.buffer.Bytes(), uint16(em.buffer.Len())) - return em.buffer.Bytes() -} - -// writeEventHeader writes the metadata for the start of an event to the buffer. -// This specifies the event name and tags. -func (em *eventMetadata) writeEventHeader(name string, tags uint32) { - binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder - em.writeTags(tags) - em.buffer.WriteString(name) - em.buffer.WriteByte(0) // Null terminator for name -} - -func (em *eventMetadata) writeFieldInner(name string, inType inType, outType outType, tags uint32, arrSize uint16) { - em.buffer.WriteString(name) - em.buffer.WriteByte(0) // Null terminator for name - - if outType == outTypeDefault && tags == 0 { - em.buffer.WriteByte(byte(inType)) - } else { - em.buffer.WriteByte(byte(inType | 128)) - if tags == 0 { - em.buffer.WriteByte(byte(outType)) - } else { - em.buffer.WriteByte(byte(outType | 128)) - em.writeTags(tags) - } - } - - if arrSize != 0 { - binary.Write(&em.buffer, binary.LittleEndian, arrSize) - } -} - -// writeTags writes out the tags value to the event metadata. Tags is a 28-bit -// value, interpreted as bit flags, which are only relevant to the event -// consumer. The event consumer may choose to attribute special meaning to tags -// (e.g. 0x4 could mean the field contains PII). Tags are written as a series of -// bytes, each containing 7 bits of tag value, with the high bit set if there is -// more tag data in the following byte. This allows for a more compact -// representation when not all of the tag bits are needed. -func (em *eventMetadata) writeTags(tags uint32) { - // Only use the top 28 bits of the tags value. - tags &= 0xfffffff - - for { - // Tags are written with the most significant bits (e.g. 21-27) first. - val := tags >> 21 - - if tags&0x1fffff == 0 { - // If there is no more data to write after this, write this value - // without the high bit set, and return. - em.buffer.WriteByte(byte(val & 0x7f)) - return - } - - em.buffer.WriteByte(byte(val | 0x80)) - - tags <<= 7 - } -} - -// writeField writes the metadata for a simple field to the buffer. -func (em *eventMetadata) writeField(name string, inType inType, outType outType, tags uint32) { - em.writeFieldInner(name, inType, outType, tags, 0) -} - -// writeArray writes the metadata for an array field to the buffer. The number -// of elements in the array must be written as a uint16 in the event data, -// immediately preceeding the event data. -func (em *eventMetadata) writeArray(name string, inType inType, outType outType, tags uint32) { - em.writeFieldInner(name, inType|inTypeArray, outType, tags, 0) -} - -// writeCountedArray writes the metadata for an array field to the buffer. The -// size of a counted array is fixed, and the size is written into the metadata -// directly. -func (em *eventMetadata) writeCountedArray(name string, count uint16, inType inType, outType outType, tags uint32) { - em.writeFieldInner(name, inType|inTypeCountedArray, outType, tags, count) -} - -// writeStruct writes the metadata for a nested struct to the buffer. The struct -// contains the next N fields in the metadata, where N is specified by the -// fieldCount argument. -func (em *eventMetadata) writeStruct(name string, fieldCount uint8, tags uint32) { - em.writeFieldInner(name, inTypeStruct, outType(fieldCount), tags, 0) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go deleted file mode 100644 index eaace68..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build windows - -package etw - -import ( - "github.com/Microsoft/go-winio/pkg/guid" -) - -type eventOptions struct { - descriptor *eventDescriptor - activityID guid.GUID - relatedActivityID guid.GUID - tags uint32 -} - -// EventOpt defines the option function type that can be passed to -// Provider.WriteEvent to specify general event options, such as level and -// keyword. -type EventOpt func(options *eventOptions) - -// WithEventOpts returns the variadic arguments as a single slice. -func WithEventOpts(opts ...EventOpt) []EventOpt { - return opts -} - -// WithLevel specifies the level of the event to be written. -func WithLevel(level Level) EventOpt { - return func(options *eventOptions) { - options.descriptor.level = level - } -} - -// WithKeyword specifies the keywords of the event to be written. Multiple uses -// of this option are OR'd together. -func WithKeyword(keyword uint64) EventOpt { - return func(options *eventOptions) { - options.descriptor.keyword |= keyword - } -} - -// WithChannel specifies the channel of the event to be written. -func WithChannel(channel Channel) EventOpt { - return func(options *eventOptions) { - options.descriptor.channel = channel - } -} - -// WithOpcode specifies the opcode of the event to be written. -func WithOpcode(opcode Opcode) EventOpt { - return func(options *eventOptions) { - options.descriptor.opcode = opcode - } -} - -// WithTags specifies the tags of the event to be written. Tags is a 28-bit -// value (top 4 bits are ignored) which are interpreted by the event consumer. -func WithTags(newTags uint32) EventOpt { - return func(options *eventOptions) { - options.tags |= newTags - } -} - -// WithActivityID specifies the activity ID of the event to be written. -func WithActivityID(activityID guid.GUID) EventOpt { - return func(options *eventOptions) { - options.activityID = activityID - } -} - -// WithRelatedActivityID specifies the parent activity ID of the event to be written. -func WithRelatedActivityID(activityID guid.GUID) EventOpt { - return func(options *eventOptions) { - options.relatedActivityID = activityID - } -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go deleted file mode 100644 index b5ea80a..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go +++ /dev/null @@ -1,516 +0,0 @@ -// +build windows - -package etw - -import ( - "fmt" - "math" - "reflect" - "syscall" - "time" - "unsafe" -) - -// FieldOpt defines the option function type that can be passed to -// Provider.WriteEvent to add fields to the event. -type FieldOpt func(em *eventMetadata, ed *eventData) - -// WithFields returns the variadic arguments as a single slice. -func WithFields(opts ...FieldOpt) []FieldOpt { - return opts -} - -// BoolField adds a single bool field to the event. -func BoolField(name string, value bool) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeUint8, outTypeBoolean, 0) - bool8 := uint8(0) - if value { - bool8 = uint8(1) - } - ed.writeUint8(bool8) - } -} - -// BoolArray adds an array of bool to the event. -func BoolArray(name string, values []bool) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeUint8, outTypeBoolean, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - bool8 := uint8(0) - if v { - bool8 = uint8(1) - } - ed.writeUint8(bool8) - } - } -} - -// StringField adds a single string field to the event. -func StringField(name string, value string) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeANSIString, outTypeUTF8, 0) - ed.writeString(value) - } -} - -// StringArray adds an array of string to the event. -func StringArray(name string, values []string) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeANSIString, outTypeUTF8, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeString(v) - } - } -} - -// IntField adds a single int field to the event. -func IntField(name string, value int) FieldOpt { - switch unsafe.Sizeof(value) { - case 4: - return Int32Field(name, int32(value)) - case 8: - return Int64Field(name, int64(value)) - default: - panic("Unsupported int size") - } -} - -// IntArray adds an array of int to the event. -func IntArray(name string, values []int) FieldOpt { - inType := inTypeNull - var writeItem func(*eventData, int) - switch unsafe.Sizeof(values[0]) { - case 4: - inType = inTypeInt32 - writeItem = func(ed *eventData, item int) { ed.writeInt32(int32(item)) } - case 8: - inType = inTypeInt64 - writeItem = func(ed *eventData, item int) { ed.writeInt64(int64(item)) } - default: - panic("Unsupported int size") - } - - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inType, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - writeItem(ed, v) - } - } -} - -// Int8Field adds a single int8 field to the event. -func Int8Field(name string, value int8) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeInt8, outTypeDefault, 0) - ed.writeInt8(value) - } -} - -// Int8Array adds an array of int8 to the event. -func Int8Array(name string, values []int8) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeInt8, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeInt8(v) - } - } -} - -// Int16Field adds a single int16 field to the event. -func Int16Field(name string, value int16) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeInt16, outTypeDefault, 0) - ed.writeInt16(value) - } -} - -// Int16Array adds an array of int16 to the event. -func Int16Array(name string, values []int16) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeInt16, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeInt16(v) - } - } -} - -// Int32Field adds a single int32 field to the event. -func Int32Field(name string, value int32) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeInt32, outTypeDefault, 0) - ed.writeInt32(value) - } -} - -// Int32Array adds an array of int32 to the event. -func Int32Array(name string, values []int32) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeInt32, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeInt32(v) - } - } -} - -// Int64Field adds a single int64 field to the event. -func Int64Field(name string, value int64) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeInt64, outTypeDefault, 0) - ed.writeInt64(value) - } -} - -// Int64Array adds an array of int64 to the event. -func Int64Array(name string, values []int64) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeInt64, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeInt64(v) - } - } -} - -// UintField adds a single uint field to the event. -func UintField(name string, value uint) FieldOpt { - switch unsafe.Sizeof(value) { - case 4: - return Uint32Field(name, uint32(value)) - case 8: - return Uint64Field(name, uint64(value)) - default: - panic("Unsupported uint size") - } -} - -// UintArray adds an array of uint to the event. -func UintArray(name string, values []uint) FieldOpt { - inType := inTypeNull - var writeItem func(*eventData, uint) - switch unsafe.Sizeof(values[0]) { - case 4: - inType = inTypeUint32 - writeItem = func(ed *eventData, item uint) { ed.writeUint32(uint32(item)) } - case 8: - inType = inTypeUint64 - writeItem = func(ed *eventData, item uint) { ed.writeUint64(uint64(item)) } - default: - panic("Unsupported uint size") - } - - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inType, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - writeItem(ed, v) - } - } -} - -// Uint8Field adds a single uint8 field to the event. -func Uint8Field(name string, value uint8) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeUint8, outTypeDefault, 0) - ed.writeUint8(value) - } -} - -// Uint8Array adds an array of uint8 to the event. -func Uint8Array(name string, values []uint8) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeUint8, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeUint8(v) - } - } -} - -// Uint16Field adds a single uint16 field to the event. -func Uint16Field(name string, value uint16) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeUint16, outTypeDefault, 0) - ed.writeUint16(value) - } -} - -// Uint16Array adds an array of uint16 to the event. -func Uint16Array(name string, values []uint16) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeUint16, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeUint16(v) - } - } -} - -// Uint32Field adds a single uint32 field to the event. -func Uint32Field(name string, value uint32) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeUint32, outTypeDefault, 0) - ed.writeUint32(value) - } -} - -// Uint32Array adds an array of uint32 to the event. -func Uint32Array(name string, values []uint32) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeUint32, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeUint32(v) - } - } -} - -// Uint64Field adds a single uint64 field to the event. -func Uint64Field(name string, value uint64) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeUint64, outTypeDefault, 0) - ed.writeUint64(value) - } -} - -// Uint64Array adds an array of uint64 to the event. -func Uint64Array(name string, values []uint64) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeUint64, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeUint64(v) - } - } -} - -// UintptrField adds a single uintptr field to the event. -func UintptrField(name string, value uintptr) FieldOpt { - inType := inTypeNull - var writeItem func(*eventData, uintptr) - switch unsafe.Sizeof(value) { - case 4: - inType = inTypeHexInt32 - writeItem = func(ed *eventData, item uintptr) { ed.writeUint32(uint32(item)) } - case 8: - inType = inTypeHexInt64 - writeItem = func(ed *eventData, item uintptr) { ed.writeUint64(uint64(item)) } - default: - panic("Unsupported uintptr size") - } - - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inType, outTypeDefault, 0) - writeItem(ed, value) - } -} - -// UintptrArray adds an array of uintptr to the event. -func UintptrArray(name string, values []uintptr) FieldOpt { - inType := inTypeNull - var writeItem func(*eventData, uintptr) - switch unsafe.Sizeof(values[0]) { - case 4: - inType = inTypeHexInt32 - writeItem = func(ed *eventData, item uintptr) { ed.writeUint32(uint32(item)) } - case 8: - inType = inTypeHexInt64 - writeItem = func(ed *eventData, item uintptr) { ed.writeUint64(uint64(item)) } - default: - panic("Unsupported uintptr size") - } - - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inType, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - writeItem(ed, v) - } - } -} - -// Float32Field adds a single float32 field to the event. -func Float32Field(name string, value float32) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeFloat, outTypeDefault, 0) - ed.writeUint32(math.Float32bits(value)) - } -} - -// Float32Array adds an array of float32 to the event. -func Float32Array(name string, values []float32) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeFloat, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeUint32(math.Float32bits(v)) - } - } -} - -// Float64Field adds a single float64 field to the event. -func Float64Field(name string, value float64) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeDouble, outTypeDefault, 0) - ed.writeUint64(math.Float64bits(value)) - } -} - -// Float64Array adds an array of float64 to the event. -func Float64Array(name string, values []float64) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeArray(name, inTypeDouble, outTypeDefault, 0) - ed.writeUint16(uint16(len(values))) - for _, v := range values { - ed.writeUint64(math.Float64bits(v)) - } - } -} - -// Struct adds a nested struct to the event, the FieldOpts in the opts argument -// are used to specify the fields of the struct. -func Struct(name string, opts ...FieldOpt) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeStruct(name, uint8(len(opts)), 0) - for _, opt := range opts { - opt(em, ed) - } - } -} - -// Time adds a time to the event. -func Time(name string, value time.Time) FieldOpt { - return func(em *eventMetadata, ed *eventData) { - em.writeField(name, inTypeFileTime, outTypeDateTimeUTC, 0) - ed.writeFiletime(syscall.NsecToFiletime(value.UTC().UnixNano())) - } -} - -// Currently, we support logging basic builtin types (int, string, etc), slices -// of basic builtin types, error, types derived from the basic types (e.g. "type -// foo int"), and structs (recursively logging their fields). We do not support -// slices of derived types (e.g. "[]foo"). -// -// For types that we don't support, the value is formatted via fmt.Sprint, and -// we also log a message that the type is unsupported along with the formatted -// type. The intent of this is to make it easier to see which types are not -// supported in traces, so we can evaluate adding support for more types in the -// future. -func SmartField(name string, v interface{}) FieldOpt { - switch v := v.(type) { - case bool: - return BoolField(name, v) - case []bool: - return BoolArray(name, v) - case string: - return StringField(name, v) - case []string: - return StringArray(name, v) - case int: - return IntField(name, v) - case []int: - return IntArray(name, v) - case int8: - return Int8Field(name, v) - case []int8: - return Int8Array(name, v) - case int16: - return Int16Field(name, v) - case []int16: - return Int16Array(name, v) - case int32: - return Int32Field(name, v) - case []int32: - return Int32Array(name, v) - case int64: - return Int64Field(name, v) - case []int64: - return Int64Array(name, v) - case uint: - return UintField(name, v) - case []uint: - return UintArray(name, v) - case uint8: - return Uint8Field(name, v) - case []uint8: - return Uint8Array(name, v) - case uint16: - return Uint16Field(name, v) - case []uint16: - return Uint16Array(name, v) - case uint32: - return Uint32Field(name, v) - case []uint32: - return Uint32Array(name, v) - case uint64: - return Uint64Field(name, v) - case []uint64: - return Uint64Array(name, v) - case uintptr: - return UintptrField(name, v) - case []uintptr: - return UintptrArray(name, v) - case float32: - return Float32Field(name, v) - case []float32: - return Float32Array(name, v) - case float64: - return Float64Field(name, v) - case []float64: - return Float64Array(name, v) - case error: - return StringField(name, v.Error()) - case time.Time: - return Time(name, v) - default: - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool: - return SmartField(name, rv.Bool()) - case reflect.Int: - return SmartField(name, int(rv.Int())) - case reflect.Int8: - return SmartField(name, int8(rv.Int())) - case reflect.Int16: - return SmartField(name, int16(rv.Int())) - case reflect.Int32: - return SmartField(name, int32(rv.Int())) - case reflect.Int64: - return SmartField(name, int64(rv.Int())) - case reflect.Uint: - return SmartField(name, uint(rv.Uint())) - case reflect.Uint8: - return SmartField(name, uint8(rv.Uint())) - case reflect.Uint16: - return SmartField(name, uint16(rv.Uint())) - case reflect.Uint32: - return SmartField(name, uint32(rv.Uint())) - case reflect.Uint64: - return SmartField(name, uint64(rv.Uint())) - case reflect.Uintptr: - return SmartField(name, uintptr(rv.Uint())) - case reflect.Float32: - return SmartField(name, float32(rv.Float())) - case reflect.Float64: - return SmartField(name, float64(rv.Float())) - case reflect.String: - return SmartField(name, rv.String()) - case reflect.Struct: - fields := make([]FieldOpt, 0, rv.NumField()) - for i := 0; i < rv.NumField(); i++ { - field := rv.Field(i) - if field.CanInterface() { - fields = append(fields, SmartField(name, field.Interface())) - } - } - return Struct(name, fields...) - } - } - - return StringField(name, fmt.Sprintf("(Unsupported: %T) %v", v, v)) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go deleted file mode 100644 index 581ef59..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build windows -// +build amd64 arm64 386 - -package etw - -import ( - "bytes" - "encoding/binary" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -// NewProviderWithOptions creates and registers a new ETW provider, allowing -// the provider ID and Group to be manually specified. This is most useful when -// there is an existing provider ID that must be used to conform to existing -// diagnostic infrastructure. -func NewProviderWithOptions(name string, options ...ProviderOpt) (provider *Provider, err error) { - var opts providerOpts - for _, opt := range options { - opt(&opts) - } - - if opts.id == (guid.GUID{}) { - opts.id = providerIDFromName(name) - } - - providerCallbackOnce.Do(func() { - globalProviderCallback = windows.NewCallback(providerCallbackAdapter) - }) - - provider = providers.newProvider() - defer func(provider *Provider) { - if err != nil { - providers.removeProvider(provider) - } - }(provider) - provider.ID = opts.id - provider.callback = opts.callback - - if err := eventRegister((*windows.GUID)(&provider.ID), globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil { - return nil, err - } - - trait := &bytes.Buffer{} - if opts.group != (guid.GUID{}) { - binary.Write(trait, binary.LittleEndian, uint16(0)) // Write empty size for buffer (update later) - binary.Write(trait, binary.LittleEndian, uint8(1)) // EtwProviderTraitTypeGroup - traitArray := opts.group.ToWindowsArray() // Append group guid - trait.Write(traitArray[:]) - binary.LittleEndian.PutUint16(trait.Bytes(), uint16(trait.Len())) // Update size - } - - metadata := &bytes.Buffer{} - binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later) - metadata.WriteString(name) - metadata.WriteByte(0) // Null terminator for name - trait.WriteTo(metadata) // Add traits if applicable - binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer - provider.metadata = metadata.Bytes() - - if err := eventSetInformation( - provider.handle, - eventInfoClassProviderSetTraits, - uintptr(unsafe.Pointer(&provider.metadata[0])), - uint32(len(provider.metadata))); err != nil { - - return nil, err - } - - return provider, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go deleted file mode 100644 index 5a05c13..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows -// +build arm - -package etw - -// NewProviderWithID returns a nil provider on unsupported platforms. -func NewProviderWithOptions(name string, options ...ProviderOpt) (provider *Provider, err error) { - return nil, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go deleted file mode 100644 index e912b51..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go +++ /dev/null @@ -1,282 +0,0 @@ -// +build windows - -package etw - -import ( - "crypto/sha1" - "encoding/binary" - "strings" - "unicode/utf16" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -// Provider represents an ETW event provider. It is identified by a provider -// name and ID (GUID), which should always have a 1:1 mapping to each other -// (e.g. don't use multiple provider names with the same ID, or vice versa). -type Provider struct { - ID guid.GUID - handle providerHandle - metadata []byte - callback EnableCallback - index uint - enabled bool - level Level - keywordAny uint64 - keywordAll uint64 -} - -// String returns the `provider`.ID as a string -func (provider *Provider) String() string { - if provider == nil { - return "" - } - - return provider.ID.String() -} - -type providerHandle uint64 - -// ProviderState informs the provider EnableCallback what action is being -// performed. -type ProviderState uint32 - -const ( - // ProviderStateDisable indicates the provider is being disabled. - ProviderStateDisable ProviderState = iota - // ProviderStateEnable indicates the provider is being enabled. - ProviderStateEnable - // ProviderStateCaptureState indicates the provider is having its current - // state snap-shotted. - ProviderStateCaptureState -) - -type eventInfoClass uint32 - -const ( - eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota - eventInfoClassProviderSetReserved1 - eventInfoClassProviderSetTraits - eventInfoClassProviderUseDescriptorType -) - -// EnableCallback is the form of the callback function that receives provider -// enable/disable notifications from ETW. -type EnableCallback func(guid.GUID, ProviderState, Level, uint64, uint64, uintptr) - -func providerCallback(sourceID guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) { - provider := providers.getProvider(uint(i)) - - switch state { - case ProviderStateDisable: - provider.enabled = false - case ProviderStateEnable: - provider.enabled = true - provider.level = level - provider.keywordAny = matchAnyKeyword - provider.keywordAll = matchAllKeyword - } - - if provider.callback != nil { - provider.callback(sourceID, state, level, matchAnyKeyword, matchAllKeyword, filterData) - } -} - -// providerCallbackAdapter acts as the first-level callback from the C/ETW side -// for provider notifications. Because Go has trouble with callback arguments of -// different size, it has only pointer-sized arguments, which are then cast to -// the appropriate types when calling providerCallback. -func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr { - providerCallback(*sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i) - return 0 -} - -// providerIDFromName generates a provider ID based on the provider name. It -// uses the same algorithm as used by .NET's EventSource class, which is based -// on RFC 4122. More information on the algorithm can be found here: -// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/ -// -// The algorithm is roughly the RFC 4122 algorithm for a V5 UUID, but differs in -// the following ways: -// - The input name is first upper-cased, UTF16-encoded, and converted to -// big-endian. -// - No variant is set on the result UUID. -// - The result UUID is treated as being in little-endian format, rather than -// big-endian. -func providerIDFromName(name string) guid.GUID { - buffer := sha1.New() - namespace := guid.GUID{0x482C2DB2, 0xC390, 0x47C8, [8]byte{0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}} - namespaceBytes := namespace.ToArray() - buffer.Write(namespaceBytes[:]) - binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name)))) - - sum := buffer.Sum(nil) - sum[7] = (sum[7] & 0xf) | 0x50 - - a := [16]byte{} - copy(a[:], sum) - return guid.FromWindowsArray(a) -} - -type providerOpts struct { - callback EnableCallback - id guid.GUID - group guid.GUID -} - -// ProviderOpt allows the caller to specify provider options to -// NewProviderWithOptions -type ProviderOpt func(*providerOpts) - -// WithCallback is used to provide a callback option to NewProviderWithOptions -func WithCallback(callback EnableCallback) ProviderOpt { - return func(opts *providerOpts) { - opts.callback = callback - } -} - -// WithID is used to provide a provider ID option to NewProviderWithOptions -func WithID(id guid.GUID) ProviderOpt { - return func(opts *providerOpts) { - opts.id = id - } -} - -// WithGroup is used to provide a provider group option to -// NewProviderWithOptions -func WithGroup(group guid.GUID) ProviderOpt { - return func(opts *providerOpts) { - opts.group = group - } -} - -// NewProviderWithID creates and registers a new ETW provider, allowing the -// provider ID to be manually specified. This is most useful when there is an -// existing provider ID that must be used to conform to existing diagnostic -// infrastructure. -func NewProviderWithID(name string, id guid.GUID, callback EnableCallback) (provider *Provider, err error) { - return NewProviderWithOptions(name, WithID(id), WithCallback(callback)) -} - -// NewProvider creates and registers a new ETW provider. The provider ID is -// generated based on the provider name. -func NewProvider(name string, callback EnableCallback) (provider *Provider, err error) { - return NewProviderWithOptions(name, WithCallback(callback)) -} - -// Close unregisters the provider. -func (provider *Provider) Close() error { - if provider == nil { - return nil - } - - providers.removeProvider(provider) - return eventUnregister(provider.handle) -} - -// IsEnabled calls IsEnabledForLevelAndKeywords with LevelAlways and all -// keywords set. -func (provider *Provider) IsEnabled() bool { - return provider.IsEnabledForLevelAndKeywords(LevelAlways, ^uint64(0)) -} - -// IsEnabledForLevel calls IsEnabledForLevelAndKeywords with the specified level -// and all keywords set. -func (provider *Provider) IsEnabledForLevel(level Level) bool { - return provider.IsEnabledForLevelAndKeywords(level, ^uint64(0)) -} - -// IsEnabledForLevelAndKeywords allows event producer code to check if there are -// any event sessions that are interested in an event, based on the event level -// and keywords. Although this check happens automatically in the ETW -// infrastructure, it can be useful to check if an event will actually be -// consumed before doing expensive work to build the event data. -func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool { - if provider == nil { - return false - } - - if !provider.enabled { - return false - } - - // ETW automatically sets the level to 255 if it is specified as 0, so we - // don't need to worry about the level=0 (all events) case. - if level > provider.level { - return false - } - - if keywords != 0 && (keywords&provider.keywordAny == 0 || keywords&provider.keywordAll != provider.keywordAll) { - return false - } - - return true -} - -// WriteEvent writes a single ETW event from the provider. The event is -// constructed based on the EventOpt and FieldOpt values that are passed as -// opts. -func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error { - if provider == nil { - return nil - } - - options := eventOptions{descriptor: newEventDescriptor()} - em := &eventMetadata{} - ed := &eventData{} - - // We need to evaluate the EventOpts first since they might change tags, and - // we write out the tags before evaluating FieldOpts. - for _, opt := range eventOpts { - opt(&options) - } - - if !provider.IsEnabledForLevelAndKeywords(options.descriptor.level, options.descriptor.keyword) { - return nil - } - - em.writeEventHeader(name, options.tags) - - for _, opt := range fieldOpts { - opt(em, ed) - } - - // Don't pass a data blob if there is no event data. There will always be - // event metadata (e.g. for the name) so we don't need to do this check for - // the metadata. - dataBlobs := [][]byte{} - if len(ed.bytes()) > 0 { - dataBlobs = [][]byte{ed.bytes()} - } - - return provider.writeEventRaw(options.descriptor, options.activityID, options.relatedActivityID, [][]byte{em.bytes()}, dataBlobs) -} - -// writeEventRaw writes a single ETW event from the provider. This function is -// less abstracted than WriteEvent, and presents a fairly direct interface to -// the event writing functionality. It expects a series of event metadata and -// event data blobs to be passed in, which must conform to the TraceLogging -// schema. The functions on EventMetadata and EventData can help with creating -// these blobs. The blobs of each type are effectively concatenated together by -// the ETW infrastructure. -func (provider *Provider) writeEventRaw( - descriptor *eventDescriptor, - activityID guid.GUID, - relatedActivityID guid.GUID, - metadataBlobs [][]byte, - dataBlobs [][]byte) error { - - dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs)) - dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount) - - dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata)) - for _, blob := range metadataBlobs { - dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob)) - } - for _, blob := range dataBlobs { - dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob)) - } - - return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(&activityID), (*windows.GUID)(&relatedActivityID), dataDescriptorCount, &dataDescriptors[0]) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go deleted file mode 100644 index ce3d305..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build windows - -package etw - -import ( - "sync" -) - -// Because the provider callback function needs to be able to access the -// provider data when it is invoked by ETW, we need to keep provider data stored -// in a global map based on an index. The index is passed as the callback -// context to ETW. -type providerMap struct { - m map[uint]*Provider - i uint - lock sync.Mutex - once sync.Once -} - -var providers = providerMap{ - m: make(map[uint]*Provider), -} - -func (p *providerMap) newProvider() *Provider { - p.lock.Lock() - defer p.lock.Unlock() - - i := p.i - p.i++ - - provider := &Provider{ - index: i, - } - - p.m[i] = provider - return provider -} - -func (p *providerMap) removeProvider(provider *Provider) { - p.lock.Lock() - defer p.lock.Unlock() - - delete(p.m, provider.index) -} - -func (p *providerMap) getProvider(index uint) *Provider { - p.lock.Lock() - defer p.lock.Unlock() - - return p.m[index] -} - -var providerCallbackOnce sync.Once -var globalProviderCallback uintptr diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go deleted file mode 100644 index d1a7612..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build 386 arm - -package etw - -import ( - "unsafe" -) - -// byteptr64 defines a struct containing a pointer. The struct is guaranteed to -// be 64 bits, regardless of the actual size of a pointer on the platform. This -// is intended for use with certain Windows APIs that expect a pointer as a -// ULONGLONG. -type ptr64 struct { - ptr unsafe.Pointer - _ uint32 -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go deleted file mode 100644 index b86c8f2..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build amd64 arm64 - -package etw - -import ( - "unsafe" -) - -// byteptr64 defines a struct containing a pointer. The struct is guaranteed to -// be 64 bits, regardless of the actual size of a pointer on the platform. This -// is intended for use with certain Windows APIs that expect a pointer as a -// ULONGLONG. -type ptr64 struct { - ptr unsafe.Pointer -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go deleted file mode 100644 index 5766d4d..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build windows -// +build 386 arm - -package etw - -import ( - "golang.org/x/sys/windows" -) - -func low(v providerHandle) uint32 { - return uint32(v & 0xffffffff) -} - -func high(v providerHandle) uint32 { - return low(v >> 32) -} - -func eventUnregister(providerHandle providerHandle) (win32err error) { - return eventUnregister_32(low(providerHandle), high(providerHandle)) -} - -func eventWriteTransfer( - providerHandle providerHandle, - descriptor *eventDescriptor, - activityID *windows.GUID, - relatedActivityID *windows.GUID, - dataDescriptorCount uint32, - dataDescriptors *eventDataDescriptor) (win32err error) { - - return eventWriteTransfer_32( - low(providerHandle), - high(providerHandle), - descriptor, - activityID, - relatedActivityID, - dataDescriptorCount, - dataDescriptors) -} - -func eventSetInformation( - providerHandle providerHandle, - class eventInfoClass, - information uintptr, - length uint32) (win32err error) { - - return eventSetInformation_32( - low(providerHandle), - high(providerHandle), - class, - information, - length) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go deleted file mode 100644 index c78d8d2..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build windows -// +build amd64 arm64 - -package etw - -import ( - "golang.org/x/sys/windows" -) - -func eventUnregister(providerHandle providerHandle) (win32err error) { - return eventUnregister_64(providerHandle) -} - -func eventWriteTransfer( - providerHandle providerHandle, - descriptor *eventDescriptor, - activityID *windows.GUID, - relatedActivityID *windows.GUID, - dataDescriptorCount uint32, - dataDescriptors *eventDataDescriptor) (win32err error) { - - return eventWriteTransfer_64( - providerHandle, - descriptor, - activityID, - relatedActivityID, - dataDescriptorCount, - dataDescriptors) -} - -func eventSetInformation( - providerHandle providerHandle, - class eventInfoClass, - information uintptr, - length uint32) (win32err error) { - - return eventSetInformation_64( - providerHandle, - class, - information, - length) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go deleted file mode 100644 index 719b13d..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package etw - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - - procEventRegister = modadvapi32.NewProc("EventRegister") - procEventSetInformation = modadvapi32.NewProc("EventSetInformation") - procEventUnregister = modadvapi32.NewProc("EventUnregister") - procEventWriteTransfer = modadvapi32.NewProc("EventWriteTransfer") -) - -func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) { - r0, _, _ := syscall.Syscall6(procEventRegister.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(callback), uintptr(callbackContext), uintptr(unsafe.Pointer(providerHandle)), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 5, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(class), uintptr(information), uintptr(length), 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func eventUnregister_64(providerHandle providerHandle) (win32err error) { - r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) { - r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 2, uintptr(providerHandle_low), uintptr(providerHandle_high), 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) { - r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) { - r0, _, _ := syscall.Syscall9(procEventWriteTransfer.Addr(), 7, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/HookTest.wprp b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/HookTest.wprp deleted file mode 100644 index b67fb6a..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/HookTest.wprp +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go deleted file mode 100644 index 4332af5..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go +++ /dev/null @@ -1,107 +0,0 @@ -// +build windows - -package etwlogrus - -import ( - "sort" - - "github.com/Microsoft/go-winio/pkg/etw" - "github.com/sirupsen/logrus" -) - -// Hook is a Logrus hook which logs received events to ETW. -type Hook struct { - provider *etw.Provider - closeProvider bool -} - -// NewHook registers a new ETW provider and returns a hook to log from it. The -// provider will be closed when the hook is closed. -func NewHook(providerName string) (*Hook, error) { - provider, err := etw.NewProvider(providerName, nil) - if err != nil { - return nil, err - } - - return &Hook{provider, true}, nil -} - -// NewHookFromProvider creates a new hook based on an existing ETW provider. The -// provider will not be closed when the hook is closed. -func NewHookFromProvider(provider *etw.Provider) (*Hook, error) { - return &Hook{provider, false}, nil -} - -// Levels returns the set of levels that this hook wants to receive log entries -// for. -func (h *Hook) Levels() []logrus.Level { - return logrus.AllLevels -} - -var logrusToETWLevelMap = map[logrus.Level]etw.Level{ - logrus.PanicLevel: etw.LevelAlways, - logrus.FatalLevel: etw.LevelCritical, - logrus.ErrorLevel: etw.LevelError, - logrus.WarnLevel: etw.LevelWarning, - logrus.InfoLevel: etw.LevelInfo, - logrus.DebugLevel: etw.LevelVerbose, - logrus.TraceLevel: etw.LevelVerbose, -} - -// Fire receives each Logrus entry as it is logged, and logs it to ETW. -func (h *Hook) Fire(e *logrus.Entry) error { - // Logrus defines more levels than ETW typically uses, but analysis is - // easiest when using a consistent set of levels across ETW providers, so we - // map the Logrus levels to ETW levels. - level := logrusToETWLevelMap[e.Level] - if !h.provider.IsEnabledForLevel(level) { - return nil - } - - // Sort the fields by name so they are consistent in each instance - // of an event. Otherwise, the fields don't line up in WPA. - names := make([]string, 0, len(e.Data)) - hasError := false - for k := range e.Data { - if k == logrus.ErrorKey { - // Always put the error last because it is optional in some events. - hasError = true - } else { - names = append(names, k) - } - } - sort.Strings(names) - - // Reserve extra space for the message and time fields. - fields := make([]etw.FieldOpt, 0, len(e.Data)+2) - fields = append(fields, etw.StringField("Message", e.Message)) - fields = append(fields, etw.Time("Time", e.Time)) - for _, k := range names { - fields = append(fields, etw.SmartField(k, e.Data[k])) - } - if hasError { - fields = append(fields, etw.SmartField(logrus.ErrorKey, e.Data[logrus.ErrorKey])) - } - - // Firing an ETW event is essentially best effort, as the event write can - // fail for reasons completely out of the control of the event writer (such - // as a session listening for the event having no available space in its - // buffers). Therefore, we don't return the error from WriteEvent, as it is - // just noise in many cases. - h.provider.WriteEvent( - "LogrusEntry", - etw.WithEventOpts(etw.WithLevel(level)), - fields) - - return nil -} - -// Close cleans up the hook and closes the ETW provider. If the provder was -// registered by etwlogrus, it will be closed as part of `Close`. If the -// provider was passed in, it will not be closed. -func (h *Hook) Close() error { - if h.closeProvider { - return h.provider.Close() - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go deleted file mode 100644 index 53bc797..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -package fs - -import ( - "errors" - "path/filepath" - - "golang.org/x/sys/windows" -) - -var ( - // ErrInvalidPath is returned when the location of a file path doesn't begin with a driver letter. - ErrInvalidPath = errors.New("the path provided to GetFileSystemType must start with a drive letter") -) - -// GetFileSystemType obtains the type of a file system through GetVolumeInformation. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx -func GetFileSystemType(path string) (fsType string, err error) { - drive := filepath.VolumeName(path) - if len(drive) != 2 { - return "", ErrInvalidPath - } - - var ( - buf = make([]uint16, 255) - size = uint32(windows.MAX_PATH + 1) - ) - drive += `\` - err = windows.GetVolumeInformation(windows.StringToUTF16Ptr(drive), nil, 0, nil, nil, nil, &buf[0], size) - fsType = windows.UTF16ToString(buf) - return -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index f497c0e..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,237 +0,0 @@ -// +build windows - -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" - "encoding" - "encoding/binary" - "fmt" - "strconv" - - "golang.org/x/sys/windows" -) - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go deleted file mode 100644 index fca2415..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ /dev/null @@ -1,161 +0,0 @@ -// +build windows - -package security - -import ( - "os" - "syscall" - "unsafe" - - "github.com/pkg/errors" -) - -type ( - accessMask uint32 - accessMode uint32 - desiredAccess uint32 - inheritMode uint32 - objectType uint32 - shareMode uint32 - securityInformation uint32 - trusteeForm uint32 - trusteeType uint32 - - explicitAccess struct { - accessPermissions accessMask - accessMode accessMode - inheritance inheritMode - trustee trustee - } - - trustee struct { - multipleTrustee *trustee - multipleTrusteeOperation int32 - trusteeForm trusteeForm - trusteeType trusteeType - name uintptr - } -) - -const ( - accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ - - accessModeGrant accessMode = 1 - - desiredAccessReadControl desiredAccess = 0x20000 - desiredAccessWriteDac desiredAccess = 0x40000 - - gvmga = "GrantVmGroupAccess:" - - inheritModeNoInheritance inheritMode = 0x0 - inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 - - objectTypeFileObject objectType = 0x1 - - securityInformationDACL securityInformation = 0x4 - - shareModeRead shareMode = 0x1 - shareModeWrite shareMode = 0x2 - - sidVmGroup = "S-1-5-83-0" - - trusteeFormIsSid trusteeForm = 0 - - trusteeTypeWellKnownGroup trusteeType = 5 -) - -// GrantVMGroupAccess sets the DACL for a specified file or directory to -// include Grant ACE entries for the VM Group SID. This is a golang re- -// implementation of the same function in vmcompute, just not exported in -// RS5. Which kind of sucks. Sucks a lot :/ -func GrantVmGroupAccess(name string) error { - // Stat (to determine if `name` is a directory). - s, err := os.Stat(name) - if err != nil { - return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) - } - - // Get a handle to the file/directory. Must defer Close on success. - fd, err := createFile(name, s.IsDir()) - if err != nil { - return err // Already wrapped - } - defer syscall.CloseHandle(fd) - - // Get the current DACL and Security Descriptor. Must defer LocalFree on success. - ot := objectTypeFileObject - si := securityInformationDACL - sd := uintptr(0) - origDACL := uintptr(0) - if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { - return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) - } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) - - // Generate a new DACL which is the current DACL with the required ACEs added. - // Must defer LocalFree on success. - newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) - if err != nil { - return err // Already wrapped - } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) - - // And finally use SetSecurityInfo to apply the updated DACL. - if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { - return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) - } - - return nil -} - -// createFile is a helper function to call [Nt]CreateFile to get a handle to -// the file or directory. -func createFile(name string, isDir bool) (syscall.Handle, error) { - namep := syscall.StringToUTF16(name) - da := uint32(desiredAccessReadControl | desiredAccessWriteDac) - sm := uint32(shareModeRead | shareModeWrite) - fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) - if isDir { - fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) - } - fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) - if err != nil { - return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) - } - return fd, nil -} - -// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. -// The caller is responsible for LocalFree of the returned DACL on success. -func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { - // Generate pointers to the SIDs based on the string SIDs - sid, err := syscall.StringToSid(sidVmGroup) - if err != nil { - return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) - } - - inheritance := inheritModeNoInheritance - if isDir { - inheritance = inheritModeSubContainersAndObjectsInherit - } - - eaArray := []explicitAccess{ - explicitAccess{ - accessPermissions: accessMaskDesiredPermission, - accessMode: accessModeGrant, - inheritance: inheritance, - trustee: trustee{ - trusteeForm: trusteeFormIsSid, - trusteeType: trusteeTypeWellKnownGroup, - name: uintptr(unsafe.Pointer(sid)), - }, - }, - } - - modifiedDACL := uintptr(0) - if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { - return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) - } - - return modifiedDACL, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go deleted file mode 100644 index c40c273..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package security - -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go - -//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo -//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo -//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go deleted file mode 100644 index 4a90cb3..0000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package security - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") -) - -func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) - if r1 != 0 { - err = errnoErr(e1) - } - return -} - -func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) - if r1 != 0 { - err = errnoErr(e1) - } - return -} - -func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) - if r1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index 9c83d36..0000000 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,202 +0,0 @@ -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "syscall" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - SE_PRIVILEGE_ENABLED = 2 - - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" -) - -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "" - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } else { - s = "Could not enable privilege " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p, _ := windows.GetCurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index fc1ee4d..0000000 --- a/vendor/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,128 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - binary.Write(&b, binary.LittleEndian, flags) - } - - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index db1b370..0000000 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows - -package winio - -import ( - "syscall" - "unsafe" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength - -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch e.Err { - case cERROR_NONE_MAPPED: - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -// LookupSidByName looks up the SID of an account by name -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) - return sid, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) - if err != nil { - return nil, &SddlConversionError{sddl, err} - } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err - } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index 5955c99..0000000 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,3 +0,0 @@ -package winio - -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go deleted file mode 100644 index b03b789..0000000 --- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ /dev/null @@ -1,323 +0,0 @@ -// +build windows - -package vhd - -import ( - "fmt" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go - -//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk -//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk -//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk -//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath - -type ( - CreateVirtualDiskFlag uint32 - VirtualDiskFlag uint32 - AttachVirtualDiskFlag uint32 - DetachVirtualDiskFlag uint32 - VirtualDiskAccessMask uint32 -) - -type VirtualStorageType struct { - DeviceID uint32 - VendorID guid.GUID -} - -type CreateVersion2 struct { - UniqueID guid.GUID - MaximumSize uint64 - BlockSizeInBytes uint32 - SectorSizeInBytes uint32 - PhysicalSectorSizeInByte uint32 - ParentPath *uint16 // string - SourcePath *uint16 // string - OpenFlags uint32 - ParentVirtualStorageType VirtualStorageType - SourceVirtualStorageType VirtualStorageType - ResiliencyGUID guid.GUID -} - -type CreateVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 CreateVersion2 -} - -type OpenVersion2 struct { - GetInfoOnly bool - ReadOnly bool - ResiliencyGUID guid.GUID -} - -type OpenVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 OpenVersion2 -} - -type AttachVersion2 struct { - RestrictedOffset uint64 - RestrictedLength uint64 -} - -type AttachVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 AttachVersion2 -} - -const ( - VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 - - // Access Mask for opening a VHD - VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 - VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 - VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 - VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 - VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 - VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 - VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 - VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 - VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 - VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 - - // Flags for creating a VHD - CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 - CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 - CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 - CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 - CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 - CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 - CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 - CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 - CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 - CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 - CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 - - // Flags for opening a VHD - OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 - OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 - OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 - OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 - OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 - OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 - OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 - OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 - OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 - OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 - OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 - - // Flags for attaching a VHD - AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 - AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 - AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 - AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 - AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 - AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 - AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 - AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 - AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 - AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 - AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 - - // Flags for detaching a VHD - DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 -) - -// CreateVhdx is a helper function to create a simple vhdx file at the given path using -// default values. -func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { - params := CreateVirtualDiskParameters{ - Version: 2, - Version2: CreateVersion2{ - MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, - BlockSizeInBytes: blockSizeInMb * 1024 * 1024, - }, - } - - handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) - if err != nil { - return err - } - - if err := syscall.CloseHandle(handle); err != nil { - return err - } - return nil -} - -// DetachVirtualDisk detaches a virtual hard disk by handle. -func DetachVirtualDisk(handle syscall.Handle) (err error) { - if err := detachVirtualDisk(handle, 0, 0); err != nil { - return errors.Wrap(err, "failed to detach virtual disk") - } - return nil -} - -// DetachVhd detaches a vhd found at `path`. -func DetachVhd(path string) error { - handle, err := OpenVirtualDisk( - path, - VirtualDiskAccessNone, - OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, - ) - if err != nil { - return err - } - defer syscall.CloseHandle(handle) - return DetachVirtualDisk(handle) -} - -// AttachVirtualDisk attaches a virtual hard disk for use. -func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { - // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. - if err := attachVirtualDisk( - handle, - nil, - uint32(attachVirtualDiskFlag), - 0, - parameters, - nil, - ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") - } - return nil -} - -// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 -// of the ATTACH_VIRTUAL_DISK_PARAMETERS. -func AttachVhd(path string) (err error) { - handle, err := OpenVirtualDisk( - path, - VirtualDiskAccessNone, - OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, - ) - if err != nil { - return err - } - - defer syscall.CloseHandle(handle) - params := AttachVirtualDiskParameters{Version: 2} - if err := AttachVirtualDisk( - handle, - AttachVirtualDiskFlagNone, - ¶ms, - ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") - } - return nil -} - -// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. -func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { - parameters := OpenVirtualDiskParameters{Version: 2} - handle, err := OpenVirtualDiskWithParameters( - vhdPath, - virtualDiskAccessMask, - openVirtualDiskFlags, - ¶meters, - ) - if err != nil { - return 0, err - } - return handle, nil -} - -// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. -func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { - var ( - handle syscall.Handle - defaultType VirtualStorageType - ) - if parameters.Version != 2 { - return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) - } - if err := openVirtualDisk( - &defaultType, - vhdPath, - uint32(virtualDiskAccessMask), - uint32(openVirtualDiskFlags), - parameters, - &handle, - ); err != nil { - return 0, errors.Wrap(err, "failed to open virtual disk") - } - return handle, nil -} - -// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. -func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { - var ( - handle syscall.Handle - defaultType VirtualStorageType - ) - if parameters.Version != 2 { - return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) - } - - if err := createVirtualDisk( - &defaultType, - path, - uint32(virtualDiskAccessMask), - nil, - uint32(createVirtualDiskFlags), - 0, - parameters, - nil, - &handle, - ); err != nil { - return handle, errors.Wrap(err, "failed to create virtual disk") - } - return handle, nil -} - -// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical -// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer -// that represents the particular enumeration of the physical disk on the caller's system. -func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { - var ( - diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars - diskPhysicalPathBuf [256]uint16 - ) - if err := getVirtualDiskPhysicalPath( - handle, - &diskPathSizeInBytes, - &diskPhysicalPathBuf[0], - ); err != nil { - return "", errors.Wrap(err, "failed to get disk physical path") - } - return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil -} - -// CreateDiffVhd is a helper function to create a differencing virtual disk. -func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { - // Setting `ParentPath` is how to signal to create a differencing disk. - createParams := &CreateVirtualDiskParameters{ - Version: 2, - Version2: CreateVersion2{ - ParentPath: windows.StringToUTF16Ptr(baseVhdPath), - BlockSizeInBytes: blockSizeInMB * 1024 * 1024, - OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), - }, - } - - vhdHandle, err := CreateVirtualDisk( - diffVhdPath, - VirtualDiskAccessNone, - CreateVirtualDiskFlagNone, - createParams, - ) - if err != nil { - return fmt.Errorf("failed to create differencing vhd: %s", err) - } - if err := syscall.CloseHandle(vhdHandle); err != nil { - return fmt.Errorf("failed to close differencing vhd handle: %s", err) - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go deleted file mode 100644 index 572f7b4..0000000 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ /dev/null @@ -1,106 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package vhd - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") - - procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") - procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") - procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") - procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") - procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") -) - -func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) - if r1 != 0 { - err = errnoErr(e1) - } - return -} - -func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { - return - } - return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) -} - -func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - err = errnoErr(e1) - } - return -} - -func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) - if r1 != 0 { - err = errnoErr(e1) - } - return -} - -func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) - if r1 != 0 { - err = errnoErr(e1) - } - return -} - -func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { - return - } - return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) -} - -func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go deleted file mode 100644 index 176ff75..0000000 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ /dev/null @@ -1,427 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procbind = modws2_32.NewProc("bind") -) - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - err = errnoErr(e1) - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) - return -} - -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) - return -} - -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) - return -} - -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) - return -} - -func rtlNtStatusToDosError(status ntstatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes deleted file mode 100644 index 94f480d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore deleted file mode 100644 index aec9bd4..0000000 --- a/vendor/github.com/Microsoft/hcsshim/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.exe -.idea -.vscode diff --git a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS deleted file mode 100644 index f4c5a07..0000000 --- a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @microsoft/containerplat \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE deleted file mode 100644 index 49d2166..0000000 --- a/vendor/github.com/Microsoft/hcsshim/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md deleted file mode 100644 index 95c3003..0000000 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# hcsshim - -[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) - -This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). - -It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project. - - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Dependencies - -This project requires Golang 1.9 or newer to build. - -For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). - -## Reporting Security Issues - -Security issues and bugs should be reported privately, via email, to the Microsoft Security -Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should -receive a response within 24 hours. If for some reason you do not, please follow up via -email to ensure we received your original message. Further information, including the -[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in -the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). - -For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet - ---------------- -Copyright (c) 2018 Microsoft Corp. All rights reserved. diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/doc.go b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/doc.go deleted file mode 100644 index 0684d05..0000000 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/doc.go +++ /dev/null @@ -1 +0,0 @@ -package options diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/next.pb.txt b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/next.pb.txt deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go deleted file mode 100644 index 89aff37..0000000 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go +++ /dev/null @@ -1,1542 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto - -package options - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - io "io" - math "math" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type Options_DebugType int32 - -const ( - Options_NPIPE Options_DebugType = 0 - Options_FILE Options_DebugType = 1 - Options_ETW Options_DebugType = 2 -) - -var Options_DebugType_name = map[int32]string{ - 0: "NPIPE", - 1: "FILE", - 2: "ETW", -} - -var Options_DebugType_value = map[string]int32{ - "NPIPE": 0, - "FILE": 1, - "ETW": 2, -} - -func (x Options_DebugType) String() string { - return proto.EnumName(Options_DebugType_name, int32(x)) -} - -func (Options_DebugType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b643df6839c75082, []int{0, 0} -} - -type Options_SandboxIsolation int32 - -const ( - Options_PROCESS Options_SandboxIsolation = 0 - Options_HYPERVISOR Options_SandboxIsolation = 1 -) - -var Options_SandboxIsolation_name = map[int32]string{ - 0: "PROCESS", - 1: "HYPERVISOR", -} - -var Options_SandboxIsolation_value = map[string]int32{ - "PROCESS": 0, - "HYPERVISOR": 1, -} - -func (x Options_SandboxIsolation) String() string { - return proto.EnumName(Options_SandboxIsolation_name, int32(x)) -} - -func (Options_SandboxIsolation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b643df6839c75082, []int{0, 1} -} - -// Options are the set of customizations that can be passed at Create time. -type Options struct { - // Enable debug tracing (sets the logrus log level to debug). This may be deprecated in the future, prefer - // log_level as this will override debug if both of them are set. - Debug bool `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` - // debug tracing output type - DebugType Options_DebugType `protobuf:"varint,2,opt,name=debug_type,json=debugType,proto3,enum=containerd.runhcs.v1.Options_DebugType" json:"debug_type,omitempty"` - // registry key root for storage of the runhcs container state - RegistryRoot string `protobuf:"bytes,3,opt,name=registry_root,json=registryRoot,proto3" json:"registry_root,omitempty"` - // sandbox_image is the image to use for the sandbox that matches the - // sandbox_platform. - SandboxImage string `protobuf:"bytes,4,opt,name=sandbox_image,json=sandboxImage,proto3" json:"sandbox_image,omitempty"` - // sandbox_platform is a CRI setting that specifies the platform - // architecture for all sandbox's in this runtime. Values are - // 'windows/amd64' and 'linux/amd64'. - SandboxPlatform string `protobuf:"bytes,5,opt,name=sandbox_platform,json=sandboxPlatform,proto3" json:"sandbox_platform,omitempty"` - // sandbox_isolation is a CRI setting that specifies the isolation level of - // the sandbox. For Windows runtime PROCESS and HYPERVISOR are valid. For - // LCOW only HYPERVISOR is valid and default if omitted. - SandboxIsolation Options_SandboxIsolation `protobuf:"varint,6,opt,name=sandbox_isolation,json=sandboxIsolation,proto3,enum=containerd.runhcs.v1.Options_SandboxIsolation" json:"sandbox_isolation,omitempty"` - // boot_files_root_path is the path to the directory containing the LCOW - // kernel and root FS files. - BootFilesRootPath string `protobuf:"bytes,7,opt,name=boot_files_root_path,json=bootFilesRootPath,proto3" json:"boot_files_root_path,omitempty"` - // vm_processor_count is the default number of processors to create for the - // hypervisor isolated utility vm. - // - // The platform default if omitted is 2, unless the host only has a single - // core in which case it is 1. - VmProcessorCount int32 `protobuf:"varint,8,opt,name=vm_processor_count,json=vmProcessorCount,proto3" json:"vm_processor_count,omitempty"` - // vm_memory_size_in_mb is the default amount of memory to assign to the - // hypervisor isolated utility vm. - // - // The platform default is 1024MB if omitted. - VmMemorySizeInMb int32 `protobuf:"varint,9,opt,name=vm_memory_size_in_mb,json=vmMemorySizeInMb,proto3" json:"vm_memory_size_in_mb,omitempty"` - // GPUVHDPath is the path to the gpu vhd to add to the uvm - // when a container requests a gpu - GPUVHDPath string `protobuf:"bytes,10,opt,name=GPUVHDPath,proto3" json:"GPUVHDPath,omitempty"` - // scale_cpu_limits_to_sandbox indicates that container CPU limits should - // be adjusted to account for the difference in number of cores between the - // host and UVM. - ScaleCpuLimitsToSandbox bool `protobuf:"varint,11,opt,name=scale_cpu_limits_to_sandbox,json=scaleCpuLimitsToSandbox,proto3" json:"scale_cpu_limits_to_sandbox,omitempty"` - // default_container_scratch_size_in_gb is the default scratch size (sandbox.vhdx) - // to be used for containers. Every container will get a sandbox of `size_in_gb` assigned - // instead of the default of 20GB. - DefaultContainerScratchSizeInGb int32 `protobuf:"varint,12,opt,name=default_container_scratch_size_in_gb,json=defaultContainerScratchSizeInGb,proto3" json:"default_container_scratch_size_in_gb,omitempty"` - // default_vm_scratch_size_in_gb is the default scratch size (sandbox.vhdx) - // to be used for the UVM. This only applies to WCOW as LCOW doesn't mount a scratch - // specifically for the UVM. - DefaultVmScratchSizeInGb int32 `protobuf:"varint,13,opt,name=default_vm_scratch_size_in_gb,json=defaultVmScratchSizeInGb,proto3" json:"default_vm_scratch_size_in_gb,omitempty"` - // share_scratch specifies if we'd like to reuse scratch space between multiple containers. - // This currently only affects LCOW. The sandbox containers scratch space is re-used for all - // subsequent containers launched in the pod. - ShareScratch bool `protobuf:"varint,14,opt,name=share_scratch,json=shareScratch,proto3" json:"share_scratch,omitempty"` - // NCProxyAddr is the address of the network configuration proxy service. If omitted - // the network is setup locally. - NCProxyAddr string `protobuf:"bytes,15,opt,name=NCProxyAddr,proto3" json:"NCProxyAddr,omitempty"` - // log_level specifies the logrus log level for the shim. Supported values are a string representation of the - // logrus log levels: "trace", "debug", "info", "warn", "error", "fatal", "panic". This setting will override - // the `debug` field if both are specified, unless the level specified is also "debug", as these are equivalent. - LogLevel string `protobuf:"bytes,16,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"` - // io_retry_timeout_in_sec is the timeout in seconds for how long to try and reconnect to an upstream IO provider if a connection is lost. - // The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite - // timeout. - IoRetryTimeoutInSec int32 `protobuf:"varint,17,opt,name=io_retry_timeout_in_sec,json=ioRetryTimeoutInSec,proto3" json:"io_retry_timeout_in_sec,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Options) Reset() { *m = Options{} } -func (*Options) ProtoMessage() {} -func (*Options) Descriptor() ([]byte, []int) { - return fileDescriptor_b643df6839c75082, []int{0} -} -func (m *Options) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Options.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Options) XXX_Merge(src proto.Message) { - xxx_messageInfo_Options.Merge(m, src) -} -func (m *Options) XXX_Size() int { - return m.Size() -} -func (m *Options) XXX_DiscardUnknown() { - xxx_messageInfo_Options.DiscardUnknown(m) -} - -var xxx_messageInfo_Options proto.InternalMessageInfo - -// ProcessDetails contains additional information about a process. This is the additional -// info returned in the Pids query. -type ProcessDetails struct { - ImageName string `protobuf:"bytes,1,opt,name=image_name,json=imageName,proto3" json:"image_name,omitempty"` - CreatedAt time.Time `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` - KernelTime_100Ns uint64 `protobuf:"varint,3,opt,name=kernel_time_100_ns,json=kernelTime100Ns,proto3" json:"kernel_time_100_ns,omitempty"` - MemoryCommitBytes uint64 `protobuf:"varint,4,opt,name=memory_commit_bytes,json=memoryCommitBytes,proto3" json:"memory_commit_bytes,omitempty"` - MemoryWorkingSetPrivateBytes uint64 `protobuf:"varint,5,opt,name=memory_working_set_private_bytes,json=memoryWorkingSetPrivateBytes,proto3" json:"memory_working_set_private_bytes,omitempty"` - MemoryWorkingSetSharedBytes uint64 `protobuf:"varint,6,opt,name=memory_working_set_shared_bytes,json=memoryWorkingSetSharedBytes,proto3" json:"memory_working_set_shared_bytes,omitempty"` - ProcessID uint32 `protobuf:"varint,7,opt,name=process_id,json=processId,proto3" json:"process_id,omitempty"` - UserTime_100Ns uint64 `protobuf:"varint,8,opt,name=user_time_100_ns,json=userTime100Ns,proto3" json:"user_time_100_ns,omitempty"` - ExecID string `protobuf:"bytes,9,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProcessDetails) Reset() { *m = ProcessDetails{} } -func (*ProcessDetails) ProtoMessage() {} -func (*ProcessDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_b643df6839c75082, []int{1} -} -func (m *ProcessDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProcessDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessDetails.Merge(m, src) -} -func (m *ProcessDetails) XXX_Size() int { - return m.Size() -} -func (m *ProcessDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.runhcs.v1.Options_DebugType", Options_DebugType_name, Options_DebugType_value) - proto.RegisterEnum("containerd.runhcs.v1.Options_SandboxIsolation", Options_SandboxIsolation_name, Options_SandboxIsolation_value) - proto.RegisterType((*Options)(nil), "containerd.runhcs.v1.Options") - proto.RegisterType((*ProcessDetails)(nil), "containerd.runhcs.v1.ProcessDetails") -} - -func init() { - proto.RegisterFile("github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto", fileDescriptor_b643df6839c75082) -} - -var fileDescriptor_b643df6839c75082 = []byte{ - // 953 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6f, 0xdb, 0x36, - 0x17, 0xb6, 0xda, 0x24, 0xb6, 0x4e, 0xbe, 0x1c, 0x36, 0x40, 0x85, 0xe4, 0xad, 0x6d, 0xa4, 0x2f, - 0xd0, 0x14, 0x6b, 0xa4, 0xa4, 0xdb, 0xdd, 0x06, 0x0c, 0x8d, 0xed, 0xb4, 0x1e, 0xf2, 0x61, 0xc8, - 0x59, 0xba, 0x8f, 0x0b, 0x42, 0x1f, 0x8c, 0x4c, 0x54, 0x12, 0x05, 0x92, 0xf6, 0xe2, 0x5e, 0xed, - 0x27, 0xec, 0x87, 0xec, 0x87, 0xe4, 0x72, 0x97, 0x03, 0x06, 0x64, 0xab, 0x7f, 0xc9, 0x40, 0x8a, - 0x4a, 0xbb, 0x20, 0xd8, 0xcd, 0xae, 0x4c, 0x3e, 0xcf, 0xc3, 0x87, 0xe7, 0x1c, 0x9d, 0x43, 0xc3, - 0x59, 0x42, 0xe5, 0x78, 0x12, 0xba, 0x11, 0xcb, 0xbc, 0x13, 0x1a, 0x71, 0x26, 0xd8, 0xa5, 0xf4, - 0xc6, 0x91, 0x10, 0x63, 0x9a, 0x79, 0x51, 0x16, 0x7b, 0x11, 0xcb, 0x65, 0x40, 0x73, 0xc2, 0xe3, - 0x3d, 0x85, 0xed, 0xf1, 0x49, 0x3e, 0x8e, 0xc4, 0xde, 0xf4, 0xc0, 0x63, 0x85, 0xa4, 0x2c, 0x17, - 0x5e, 0x89, 0xb8, 0x05, 0x67, 0x92, 0xa1, 0xcd, 0x8f, 0x7a, 0xd7, 0x10, 0xd3, 0x83, 0xad, 0xcd, - 0x84, 0x25, 0x4c, 0x0b, 0x3c, 0xb5, 0x2a, 0xb5, 0x5b, 0xed, 0x84, 0xb1, 0x24, 0x25, 0x9e, 0xde, - 0x85, 0x93, 0x4b, 0x4f, 0xd2, 0x8c, 0x08, 0x19, 0x64, 0x45, 0x29, 0xd8, 0xf9, 0xb5, 0x0e, 0xf5, - 0xb3, 0xf2, 0x16, 0xb4, 0x09, 0x8b, 0x31, 0x09, 0x27, 0x89, 0x63, 0x75, 0xac, 0xdd, 0x86, 0x5f, - 0x6e, 0xd0, 0x11, 0x80, 0x5e, 0x60, 0x39, 0x2b, 0x88, 0xf3, 0xa0, 0x63, 0xed, 0xae, 0xbd, 0x7c, - 0xe6, 0xde, 0x17, 0x83, 0x6b, 0x8c, 0xdc, 0x9e, 0xd2, 0x9f, 0xcf, 0x0a, 0xe2, 0xdb, 0x71, 0xb5, - 0x44, 0x4f, 0x61, 0x95, 0x93, 0x84, 0x0a, 0xc9, 0x67, 0x98, 0x33, 0x26, 0x9d, 0x87, 0x1d, 0x6b, - 0xd7, 0xf6, 0x57, 0x2a, 0xd0, 0x67, 0x4c, 0x2a, 0x91, 0x08, 0xf2, 0x38, 0x64, 0x57, 0x98, 0x66, - 0x41, 0x42, 0x9c, 0x85, 0x52, 0x64, 0xc0, 0x81, 0xc2, 0xd0, 0x73, 0x68, 0x56, 0xa2, 0x22, 0x0d, - 0xe4, 0x25, 0xe3, 0x99, 0xb3, 0xa8, 0x75, 0xeb, 0x06, 0x1f, 0x1a, 0x18, 0xfd, 0x08, 0x1b, 0xb7, - 0x7e, 0x82, 0xa5, 0x81, 0x8a, 0xcf, 0x59, 0xd2, 0x39, 0xb8, 0xff, 0x9e, 0xc3, 0xc8, 0xdc, 0x58, - 0x9d, 0xf2, 0xab, 0x3b, 0x6f, 0x11, 0xe4, 0xc1, 0x66, 0xc8, 0x98, 0xc4, 0x97, 0x34, 0x25, 0x42, - 0xe7, 0x84, 0x8b, 0x40, 0x8e, 0x9d, 0xba, 0x8e, 0x65, 0x43, 0x71, 0x47, 0x8a, 0x52, 0x99, 0x0d, - 0x03, 0x39, 0x46, 0x2f, 0x00, 0x4d, 0x33, 0x5c, 0x70, 0x16, 0x11, 0x21, 0x18, 0xc7, 0x11, 0x9b, - 0xe4, 0xd2, 0x69, 0x74, 0xac, 0xdd, 0x45, 0xbf, 0x39, 0xcd, 0x86, 0x15, 0xd1, 0x55, 0x38, 0x72, - 0x61, 0x73, 0x9a, 0xe1, 0x8c, 0x64, 0x8c, 0xcf, 0xb0, 0xa0, 0xef, 0x09, 0xa6, 0x39, 0xce, 0x42, - 0xc7, 0xae, 0xf4, 0x27, 0x9a, 0x1a, 0xd1, 0xf7, 0x64, 0x90, 0x9f, 0x84, 0xa8, 0x05, 0xf0, 0x7a, - 0xf8, 0xed, 0xc5, 0x9b, 0x9e, 0xba, 0xcb, 0x01, 0x1d, 0xc4, 0x27, 0x08, 0xfa, 0x0a, 0xb6, 0x45, - 0x14, 0xa4, 0x04, 0x47, 0xc5, 0x04, 0xa7, 0x34, 0xa3, 0x52, 0x60, 0xc9, 0xb0, 0x49, 0xcb, 0x59, - 0xd6, 0x1f, 0xfd, 0xb1, 0x96, 0x74, 0x8b, 0xc9, 0xb1, 0x16, 0x9c, 0x33, 0x53, 0x07, 0x74, 0x02, - 0xff, 0x8f, 0xc9, 0x65, 0x30, 0x49, 0x25, 0xbe, 0xad, 0x1b, 0x16, 0x11, 0x0f, 0x64, 0x34, 0xbe, - 0x8d, 0x2e, 0x09, 0x9d, 0x15, 0x1d, 0x5d, 0xdb, 0x68, 0xbb, 0x95, 0x74, 0x54, 0x2a, 0xcb, 0x60, - 0x5f, 0x87, 0xe8, 0x6b, 0x78, 0x52, 0xd9, 0x4d, 0xb3, 0xfb, 0x7c, 0x56, 0xb5, 0x8f, 0x63, 0x44, - 0x17, 0xd9, 0x5d, 0x03, 0xd5, 0x29, 0xe3, 0x80, 0x93, 0xea, 0xac, 0xb3, 0xa6, 0xe3, 0x5f, 0xd1, - 0xa0, 0x11, 0xa3, 0x0e, 0x2c, 0x9f, 0x76, 0x87, 0x9c, 0x5d, 0xcd, 0x5e, 0xc5, 0x31, 0x77, 0xd6, - 0x75, 0x4d, 0x3e, 0x85, 0xd0, 0x36, 0xd8, 0x29, 0x4b, 0x70, 0x4a, 0xa6, 0x24, 0x75, 0x9a, 0x9a, - 0x6f, 0xa4, 0x2c, 0x39, 0x56, 0x7b, 0xf4, 0x05, 0x3c, 0xa6, 0x0c, 0x73, 0xa2, 0x5a, 0x56, 0x0d, - 0x0e, 0x9b, 0x48, 0x15, 0x9d, 0x20, 0x91, 0xb3, 0xa1, 0xc3, 0x7b, 0x44, 0x99, 0xaf, 0xd8, 0xf3, - 0x92, 0x1c, 0xe4, 0x23, 0x12, 0xed, 0x3c, 0x07, 0xfb, 0x76, 0x00, 0x90, 0x0d, 0x8b, 0xa7, 0xc3, - 0xc1, 0xb0, 0xdf, 0xac, 0xa1, 0x06, 0x2c, 0x1c, 0x0d, 0x8e, 0xfb, 0x4d, 0x0b, 0xd5, 0xe1, 0x61, - 0xff, 0xfc, 0x6d, 0xf3, 0xc1, 0x8e, 0x07, 0xcd, 0xbb, 0x7d, 0x86, 0x96, 0xa1, 0x3e, 0xf4, 0xcf, - 0xba, 0xfd, 0xd1, 0xa8, 0x59, 0x43, 0x6b, 0x00, 0x6f, 0xbe, 0x1f, 0xf6, 0xfd, 0x8b, 0xc1, 0xe8, - 0xcc, 0x6f, 0x5a, 0x3b, 0x7f, 0x3c, 0x84, 0x35, 0xd3, 0x26, 0x3d, 0x22, 0x03, 0x9a, 0x0a, 0xf4, - 0x04, 0x40, 0x8f, 0x0a, 0xce, 0x83, 0x8c, 0xe8, 0xd1, 0xb5, 0x7d, 0x5b, 0x23, 0xa7, 0x41, 0x46, - 0x50, 0x17, 0x20, 0xe2, 0x24, 0x90, 0x24, 0xc6, 0x81, 0xd4, 0xe3, 0xbb, 0xfc, 0x72, 0xcb, 0x2d, - 0x9f, 0x05, 0xb7, 0x7a, 0x16, 0xdc, 0xf3, 0xea, 0x59, 0x38, 0x6c, 0x5c, 0xdf, 0xb4, 0x6b, 0xbf, - 0xfc, 0xd9, 0xb6, 0x7c, 0xdb, 0x9c, 0x7b, 0x25, 0xd1, 0x67, 0x80, 0xde, 0x11, 0x9e, 0x93, 0x54, - 0x97, 0x01, 0x1f, 0xec, 0xef, 0xe3, 0x5c, 0xe8, 0x01, 0x5e, 0xf0, 0xd7, 0x4b, 0x46, 0x39, 0x1c, - 0xec, 0xef, 0x9f, 0x0a, 0xe4, 0xc2, 0x23, 0xd3, 0xb4, 0x11, 0xcb, 0x32, 0x2a, 0x71, 0x38, 0x93, - 0x44, 0xe8, 0x49, 0x5e, 0xf0, 0x37, 0x4a, 0xaa, 0xab, 0x99, 0x43, 0x45, 0xa0, 0x23, 0xe8, 0x18, - 0xfd, 0x4f, 0x8c, 0xbf, 0xa3, 0x79, 0x82, 0x05, 0x91, 0xb8, 0xe0, 0x74, 0x1a, 0x48, 0x62, 0x0e, - 0x2f, 0xea, 0xc3, 0xff, 0x2b, 0x75, 0x6f, 0x4b, 0xd9, 0x88, 0xc8, 0x61, 0x29, 0x2a, 0x7d, 0x7a, - 0xd0, 0xbe, 0xc7, 0x47, 0xf7, 0x43, 0x6c, 0x6c, 0x96, 0xb4, 0xcd, 0xf6, 0x5d, 0x9b, 0x91, 0xd6, - 0x94, 0x2e, 0x2f, 0x00, 0xcc, 0x80, 0x62, 0x1a, 0xeb, 0x51, 0x5e, 0x3d, 0x5c, 0x9d, 0xdf, 0xb4, - 0x6d, 0x53, 0xf6, 0x41, 0xcf, 0xb7, 0x8d, 0x60, 0x10, 0xa3, 0x67, 0xd0, 0x9c, 0x08, 0xc2, 0xff, - 0x51, 0x96, 0x86, 0xbe, 0x64, 0x55, 0xe1, 0x1f, 0x8b, 0xf2, 0x14, 0xea, 0xe4, 0x8a, 0x44, 0xca, - 0x53, 0xcd, 0xaf, 0x7d, 0x08, 0xf3, 0x9b, 0xf6, 0x52, 0xff, 0x8a, 0x44, 0x83, 0x9e, 0xbf, 0xa4, - 0xa8, 0x41, 0x7c, 0x18, 0x5f, 0x7f, 0x68, 0xd5, 0x7e, 0xff, 0xd0, 0xaa, 0xfd, 0x3c, 0x6f, 0x59, - 0xd7, 0xf3, 0x96, 0xf5, 0xdb, 0xbc, 0x65, 0xfd, 0x35, 0x6f, 0x59, 0x3f, 0x7c, 0xf3, 0xdf, 0xff, - 0x44, 0xbe, 0x34, 0xbf, 0xdf, 0xd5, 0xc2, 0x25, 0xfd, 0xdd, 0x3f, 0xff, 0x3b, 0x00, 0x00, 0xff, - 0xff, 0x6b, 0x83, 0xa6, 0x5f, 0x9b, 0x06, 0x00, 0x00, -} - -func (m *Options) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Options) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Debug { - dAtA[i] = 0x8 - i++ - if m.Debug { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.DebugType != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.DebugType)) - } - if len(m.RegistryRoot) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.RegistryRoot))) - i += copy(dAtA[i:], m.RegistryRoot) - } - if len(m.SandboxImage) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxImage))) - i += copy(dAtA[i:], m.SandboxImage) - } - if len(m.SandboxPlatform) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxPlatform))) - i += copy(dAtA[i:], m.SandboxPlatform) - } - if m.SandboxIsolation != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.SandboxIsolation)) - } - if len(m.BootFilesRootPath) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.BootFilesRootPath))) - i += copy(dAtA[i:], m.BootFilesRootPath) - } - if m.VmProcessorCount != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.VmProcessorCount)) - } - if m.VmMemorySizeInMb != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb)) - } - if len(m.GPUVHDPath) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.GPUVHDPath))) - i += copy(dAtA[i:], m.GPUVHDPath) - } - if m.ScaleCpuLimitsToSandbox { - dAtA[i] = 0x58 - i++ - if m.ScaleCpuLimitsToSandbox { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.DefaultContainerScratchSizeInGb != 0 { - dAtA[i] = 0x60 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultContainerScratchSizeInGb)) - } - if m.DefaultVmScratchSizeInGb != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultVmScratchSizeInGb)) - } - if m.ShareScratch { - dAtA[i] = 0x70 - i++ - if m.ShareScratch { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.NCProxyAddr) > 0 { - dAtA[i] = 0x7a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.NCProxyAddr))) - i += copy(dAtA[i:], m.NCProxyAddr) - } - if len(m.LogLevel) > 0 { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.LogLevel))) - i += copy(dAtA[i:], m.LogLevel) - } - if m.IoRetryTimeoutInSec != 0 { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.IoRetryTimeoutInSec)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ImageName) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ImageName))) - i += copy(dAtA[i:], m.ImageName) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt))) - n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.KernelTime_100Ns != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.KernelTime_100Ns)) - } - if m.MemoryCommitBytes != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryCommitBytes)) - } - if m.MemoryWorkingSetPrivateBytes != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetPrivateBytes)) - } - if m.MemoryWorkingSetSharedBytes != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetSharedBytes)) - } - if m.ProcessID != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.ProcessID)) - } - if m.UserTime_100Ns != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.UserTime_100Ns)) - } - if len(m.ExecID) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ExecID))) - i += copy(dAtA[i:], m.ExecID) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintRunhcs(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Options) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Debug { - n += 2 - } - if m.DebugType != 0 { - n += 1 + sovRunhcs(uint64(m.DebugType)) - } - l = len(m.RegistryRoot) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - l = len(m.SandboxImage) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - l = len(m.SandboxPlatform) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - if m.SandboxIsolation != 0 { - n += 1 + sovRunhcs(uint64(m.SandboxIsolation)) - } - l = len(m.BootFilesRootPath) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - if m.VmProcessorCount != 0 { - n += 1 + sovRunhcs(uint64(m.VmProcessorCount)) - } - if m.VmMemorySizeInMb != 0 { - n += 1 + sovRunhcs(uint64(m.VmMemorySizeInMb)) - } - l = len(m.GPUVHDPath) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - if m.ScaleCpuLimitsToSandbox { - n += 2 - } - if m.DefaultContainerScratchSizeInGb != 0 { - n += 1 + sovRunhcs(uint64(m.DefaultContainerScratchSizeInGb)) - } - if m.DefaultVmScratchSizeInGb != 0 { - n += 1 + sovRunhcs(uint64(m.DefaultVmScratchSizeInGb)) - } - if m.ShareScratch { - n += 2 - } - l = len(m.NCProxyAddr) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - l = len(m.LogLevel) - if l > 0 { - n += 2 + l + sovRunhcs(uint64(l)) - } - if m.IoRetryTimeoutInSec != 0 { - n += 2 + sovRunhcs(uint64(m.IoRetryTimeoutInSec)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ProcessDetails) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ImageName) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovRunhcs(uint64(l)) - if m.KernelTime_100Ns != 0 { - n += 1 + sovRunhcs(uint64(m.KernelTime_100Ns)) - } - if m.MemoryCommitBytes != 0 { - n += 1 + sovRunhcs(uint64(m.MemoryCommitBytes)) - } - if m.MemoryWorkingSetPrivateBytes != 0 { - n += 1 + sovRunhcs(uint64(m.MemoryWorkingSetPrivateBytes)) - } - if m.MemoryWorkingSetSharedBytes != 0 { - n += 1 + sovRunhcs(uint64(m.MemoryWorkingSetSharedBytes)) - } - if m.ProcessID != 0 { - n += 1 + sovRunhcs(uint64(m.ProcessID)) - } - if m.UserTime_100Ns != 0 { - n += 1 + sovRunhcs(uint64(m.UserTime_100Ns)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovRunhcs(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRunhcs(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRunhcs(x uint64) (n int) { - return sovRunhcs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Options) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Options{`, - `Debug:` + fmt.Sprintf("%v", this.Debug) + `,`, - `DebugType:` + fmt.Sprintf("%v", this.DebugType) + `,`, - `RegistryRoot:` + fmt.Sprintf("%v", this.RegistryRoot) + `,`, - `SandboxImage:` + fmt.Sprintf("%v", this.SandboxImage) + `,`, - `SandboxPlatform:` + fmt.Sprintf("%v", this.SandboxPlatform) + `,`, - `SandboxIsolation:` + fmt.Sprintf("%v", this.SandboxIsolation) + `,`, - `BootFilesRootPath:` + fmt.Sprintf("%v", this.BootFilesRootPath) + `,`, - `VmProcessorCount:` + fmt.Sprintf("%v", this.VmProcessorCount) + `,`, - `VmMemorySizeInMb:` + fmt.Sprintf("%v", this.VmMemorySizeInMb) + `,`, - `GPUVHDPath:` + fmt.Sprintf("%v", this.GPUVHDPath) + `,`, - `ScaleCpuLimitsToSandbox:` + fmt.Sprintf("%v", this.ScaleCpuLimitsToSandbox) + `,`, - `DefaultContainerScratchSizeInGb:` + fmt.Sprintf("%v", this.DefaultContainerScratchSizeInGb) + `,`, - `DefaultVmScratchSizeInGb:` + fmt.Sprintf("%v", this.DefaultVmScratchSizeInGb) + `,`, - `ShareScratch:` + fmt.Sprintf("%v", this.ShareScratch) + `,`, - `NCProxyAddr:` + fmt.Sprintf("%v", this.NCProxyAddr) + `,`, - `LogLevel:` + fmt.Sprintf("%v", this.LogLevel) + `,`, - `IoRetryTimeoutInSec:` + fmt.Sprintf("%v", this.IoRetryTimeoutInSec) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ProcessDetails) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProcessDetails{`, - `ImageName:` + fmt.Sprintf("%v", this.ImageName) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `KernelTime_100Ns:` + fmt.Sprintf("%v", this.KernelTime_100Ns) + `,`, - `MemoryCommitBytes:` + fmt.Sprintf("%v", this.MemoryCommitBytes) + `,`, - `MemoryWorkingSetPrivateBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetPrivateBytes) + `,`, - `MemoryWorkingSetSharedBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetSharedBytes) + `,`, - `ProcessID:` + fmt.Sprintf("%v", this.ProcessID) + `,`, - `UserTime_100Ns:` + fmt.Sprintf("%v", this.UserTime_100Ns) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringRunhcs(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Options) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Options: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Options: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Debug", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Debug = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DebugType", wireType) - } - m.DebugType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DebugType |= Options_DebugType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RegistryRoot", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RegistryRoot = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SandboxImage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SandboxImage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SandboxPlatform", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SandboxPlatform = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SandboxIsolation", wireType) - } - m.SandboxIsolation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SandboxIsolation |= Options_SandboxIsolation(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BootFilesRootPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BootFilesRootPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VmProcessorCount", wireType) - } - m.VmProcessorCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VmProcessorCount |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VmMemorySizeInMb", wireType) - } - m.VmMemorySizeInMb = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VmMemorySizeInMb |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GPUVHDPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GPUVHDPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleCpuLimitsToSandbox", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ScaleCpuLimitsToSandbox = bool(v != 0) - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultContainerScratchSizeInGb", wireType) - } - m.DefaultContainerScratchSizeInGb = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DefaultContainerScratchSizeInGb |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultVmScratchSizeInGb", wireType) - } - m.DefaultVmScratchSizeInGb = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DefaultVmScratchSizeInGb |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareScratch", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ShareScratch = bool(v != 0) - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NCProxyAddr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NCProxyAddr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogLevel", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LogLevel = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoRetryTimeoutInSec", wireType) - } - m.IoRetryTimeoutInSec = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoRetryTimeoutInSec |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRunhcs(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRunhcs - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRunhcs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProcessDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImageName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelTime_100Ns", wireType) - } - m.KernelTime_100Ns = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KernelTime_100Ns |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryCommitBytes", wireType) - } - m.MemoryCommitBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryCommitBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryWorkingSetPrivateBytes", wireType) - } - m.MemoryWorkingSetPrivateBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryWorkingSetPrivateBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryWorkingSetSharedBytes", wireType) - } - m.MemoryWorkingSetSharedBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryWorkingSetSharedBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProcessID", wireType) - } - m.ProcessID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ProcessID |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UserTime_100Ns", wireType) - } - m.UserTime_100Ns = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UserTime_100Ns |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunhcs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunhcs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunhcs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRunhcs(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRunhcs - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRunhcs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRunhcs(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunhcs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunhcs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunhcs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRunhcs - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthRunhcs - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunhcs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRunhcs(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRunhcs - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRunhcs = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRunhcs = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto deleted file mode 100644 index 60c89ad..0000000 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto +++ /dev/null @@ -1,115 +0,0 @@ -syntax = "proto3"; - -package containerd.runhcs.v1; - -import weak "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options;options"; - -// Options are the set of customizations that can be passed at Create time. -message Options { - // Enable debug tracing (sets the logrus log level to debug). This may be deprecated in the future, prefer - // log_level as this will override debug if both of them are set. - bool debug = 1; - - enum DebugType { - NPIPE = 0; - FILE = 1; - ETW = 2; - } - - // debug tracing output type - DebugType debug_type = 2; - - // registry key root for storage of the runhcs container state - string registry_root = 3; - - // sandbox_image is the image to use for the sandbox that matches the - // sandbox_platform. - string sandbox_image = 4; - - // sandbox_platform is a CRI setting that specifies the platform - // architecture for all sandbox's in this runtime. Values are - // 'windows/amd64' and 'linux/amd64'. - string sandbox_platform = 5; - - enum SandboxIsolation { - PROCESS = 0; - HYPERVISOR = 1; - } - - // sandbox_isolation is a CRI setting that specifies the isolation level of - // the sandbox. For Windows runtime PROCESS and HYPERVISOR are valid. For - // LCOW only HYPERVISOR is valid and default if omitted. - SandboxIsolation sandbox_isolation = 6; - - // boot_files_root_path is the path to the directory containing the LCOW - // kernel and root FS files. - string boot_files_root_path = 7; - - // vm_processor_count is the default number of processors to create for the - // hypervisor isolated utility vm. - // - // The platform default if omitted is 2, unless the host only has a single - // core in which case it is 1. - int32 vm_processor_count = 8; - - // vm_memory_size_in_mb is the default amount of memory to assign to the - // hypervisor isolated utility vm. - // - // The platform default is 1024MB if omitted. - int32 vm_memory_size_in_mb = 9; - - // GPUVHDPath is the path to the gpu vhd to add to the uvm - // when a container requests a gpu - string GPUVHDPath = 10; - - // scale_cpu_limits_to_sandbox indicates that container CPU limits should - // be adjusted to account for the difference in number of cores between the - // host and UVM. - bool scale_cpu_limits_to_sandbox = 11; - - // default_container_scratch_size_in_gb is the default scratch size (sandbox.vhdx) - // to be used for containers. Every container will get a sandbox of `size_in_gb` assigned - // instead of the default of 20GB. - int32 default_container_scratch_size_in_gb = 12; - - // default_vm_scratch_size_in_gb is the default scratch size (sandbox.vhdx) - // to be used for the UVM. This only applies to WCOW as LCOW doesn't mount a scratch - // specifically for the UVM. - int32 default_vm_scratch_size_in_gb = 13; - - // share_scratch specifies if we'd like to reuse scratch space between multiple containers. - // This currently only affects LCOW. The sandbox containers scratch space is re-used for all - // subsequent containers launched in the pod. - bool share_scratch = 14; - - // NCProxyAddr is the address of the network configuration proxy service. If omitted - // the network is setup locally. - string NCProxyAddr = 15; - - // log_level specifies the logrus log level for the shim. Supported values are a string representation of the - // logrus log levels: "trace", "debug", "info", "warn", "error", "fatal", "panic". This setting will override - // the `debug` field if both are specified, unless the level specified is also "debug", as these are equivalent. - string log_level = 16; - - // io_retry_timeout_in_sec is the timeout in seconds for how long to try and reconnect to an upstream IO provider if a connection is lost. - // The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite - // timeout. - int32 io_retry_timeout_in_sec = 17; -} - -// ProcessDetails contains additional information about a process. This is the additional -// info returned in the Pids query. -message ProcessDetails { - string image_name = 1; - google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - uint64 kernel_time_100_ns = 3; - uint64 memory_commit_bytes = 4; - uint64 memory_working_set_private_bytes = 5; - uint64 memory_working_set_shared_bytes = 6; - uint32 process_id = 7; - uint64 user_time_100_ns = 8; - string exec_id = 9; -} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/doc.go b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/doc.go deleted file mode 100644 index 26b4d6b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -package stats - -import ( - // go mod will not vendor without an import for metrics.proto - _ "github.com/containerd/cgroups/stats/v1" -) diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/next.pb.txt b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/next.pb.txt deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go deleted file mode 100644 index 0b41b11..0000000 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go +++ /dev/null @@ -1,2819 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto - -package stats - -import ( - fmt "fmt" - v1 "github.com/containerd/cgroups/stats/v1" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - io "io" - math "math" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type Statistics struct { - // Types that are valid to be assigned to Container: - // *Statistics_Windows - // *Statistics_Linux - Container isStatistics_Container `protobuf_oneof:"container"` - VM *VirtualMachineStatistics `protobuf:"bytes,3,opt,name=vm,proto3" json:"vm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Statistics) Reset() { *m = Statistics{} } -func (*Statistics) ProtoMessage() {} -func (*Statistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{0} -} -func (m *Statistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Statistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Statistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Statistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Statistics.Merge(m, src) -} -func (m *Statistics) XXX_Size() int { - return m.Size() -} -func (m *Statistics) XXX_DiscardUnknown() { - xxx_messageInfo_Statistics.DiscardUnknown(m) -} - -var xxx_messageInfo_Statistics proto.InternalMessageInfo - -type isStatistics_Container interface { - isStatistics_Container() - MarshalTo([]byte) (int, error) - Size() int -} - -type Statistics_Windows struct { - Windows *WindowsContainerStatistics `protobuf:"bytes,1,opt,name=windows,proto3,oneof"` -} -type Statistics_Linux struct { - Linux *v1.Metrics `protobuf:"bytes,2,opt,name=linux,proto3,oneof"` -} - -func (*Statistics_Windows) isStatistics_Container() {} -func (*Statistics_Linux) isStatistics_Container() {} - -func (m *Statistics) GetContainer() isStatistics_Container { - if m != nil { - return m.Container - } - return nil -} - -func (m *Statistics) GetWindows() *WindowsContainerStatistics { - if x, ok := m.GetContainer().(*Statistics_Windows); ok { - return x.Windows - } - return nil -} - -func (m *Statistics) GetLinux() *v1.Metrics { - if x, ok := m.GetContainer().(*Statistics_Linux); ok { - return x.Linux - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Statistics) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Statistics_OneofMarshaler, _Statistics_OneofUnmarshaler, _Statistics_OneofSizer, []interface{}{ - (*Statistics_Windows)(nil), - (*Statistics_Linux)(nil), - } -} - -func _Statistics_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Statistics) - // container - switch x := m.Container.(type) { - case *Statistics_Windows: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Windows); err != nil { - return err - } - case *Statistics_Linux: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Linux); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Statistics.Container has unexpected type %T", x) - } - return nil -} - -func _Statistics_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Statistics) - switch tag { - case 1: // container.windows - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(WindowsContainerStatistics) - err := b.DecodeMessage(msg) - m.Container = &Statistics_Windows{msg} - return true, err - case 2: // container.linux - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(v1.Metrics) - err := b.DecodeMessage(msg) - m.Container = &Statistics_Linux{msg} - return true, err - default: - return false, nil - } -} - -func _Statistics_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Statistics) - // container - switch x := m.Container.(type) { - case *Statistics_Windows: - s := proto.Size(x.Windows) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *Statistics_Linux: - s := proto.Size(x.Linux) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type WindowsContainerStatistics struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - ContainerStartTime time.Time `protobuf:"bytes,2,opt,name=container_start_time,json=containerStartTime,proto3,stdtime" json:"container_start_time"` - UptimeNS uint64 `protobuf:"varint,3,opt,name=uptime_ns,json=uptimeNs,proto3" json:"uptime_ns,omitempty"` - Processor *WindowsContainerProcessorStatistics `protobuf:"bytes,4,opt,name=processor,proto3" json:"processor,omitempty"` - Memory *WindowsContainerMemoryStatistics `protobuf:"bytes,5,opt,name=memory,proto3" json:"memory,omitempty"` - Storage *WindowsContainerStorageStatistics `protobuf:"bytes,6,opt,name=storage,proto3" json:"storage,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerStatistics) Reset() { *m = WindowsContainerStatistics{} } -func (*WindowsContainerStatistics) ProtoMessage() {} -func (*WindowsContainerStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{1} -} -func (m *WindowsContainerStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerStatistics.Merge(m, src) -} -func (m *WindowsContainerStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerStatistics proto.InternalMessageInfo - -type WindowsContainerProcessorStatistics struct { - TotalRuntimeNS uint64 `protobuf:"varint,1,opt,name=total_runtime_ns,json=totalRuntimeNs,proto3" json:"total_runtime_ns,omitempty"` - RuntimeUserNS uint64 `protobuf:"varint,2,opt,name=runtime_user_ns,json=runtimeUserNs,proto3" json:"runtime_user_ns,omitempty"` - RuntimeKernelNS uint64 `protobuf:"varint,3,opt,name=runtime_kernel_ns,json=runtimeKernelNs,proto3" json:"runtime_kernel_ns,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerProcessorStatistics) Reset() { *m = WindowsContainerProcessorStatistics{} } -func (*WindowsContainerProcessorStatistics) ProtoMessage() {} -func (*WindowsContainerProcessorStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{2} -} -func (m *WindowsContainerProcessorStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerProcessorStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerProcessorStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerProcessorStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerProcessorStatistics.Merge(m, src) -} -func (m *WindowsContainerProcessorStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerProcessorStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerProcessorStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerProcessorStatistics proto.InternalMessageInfo - -type WindowsContainerMemoryStatistics struct { - MemoryUsageCommitBytes uint64 `protobuf:"varint,1,opt,name=memory_usage_commit_bytes,json=memoryUsageCommitBytes,proto3" json:"memory_usage_commit_bytes,omitempty"` - MemoryUsageCommitPeakBytes uint64 `protobuf:"varint,2,opt,name=memory_usage_commit_peak_bytes,json=memoryUsageCommitPeakBytes,proto3" json:"memory_usage_commit_peak_bytes,omitempty"` - MemoryUsagePrivateWorkingSetBytes uint64 `protobuf:"varint,3,opt,name=memory_usage_private_working_set_bytes,json=memoryUsagePrivateWorkingSetBytes,proto3" json:"memory_usage_private_working_set_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerMemoryStatistics) Reset() { *m = WindowsContainerMemoryStatistics{} } -func (*WindowsContainerMemoryStatistics) ProtoMessage() {} -func (*WindowsContainerMemoryStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{3} -} -func (m *WindowsContainerMemoryStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerMemoryStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerMemoryStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerMemoryStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerMemoryStatistics.Merge(m, src) -} -func (m *WindowsContainerMemoryStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerMemoryStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerMemoryStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerMemoryStatistics proto.InternalMessageInfo - -type WindowsContainerStorageStatistics struct { - ReadCountNormalized uint64 `protobuf:"varint,1,opt,name=read_count_normalized,json=readCountNormalized,proto3" json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `protobuf:"varint,2,opt,name=read_size_bytes,json=readSizeBytes,proto3" json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `protobuf:"varint,3,opt,name=write_count_normalized,json=writeCountNormalized,proto3" json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `protobuf:"varint,4,opt,name=write_size_bytes,json=writeSizeBytes,proto3" json:"write_size_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerStorageStatistics) Reset() { *m = WindowsContainerStorageStatistics{} } -func (*WindowsContainerStorageStatistics) ProtoMessage() {} -func (*WindowsContainerStorageStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{4} -} -func (m *WindowsContainerStorageStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerStorageStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerStorageStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerStorageStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerStorageStatistics.Merge(m, src) -} -func (m *WindowsContainerStorageStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerStorageStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerStorageStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerStorageStatistics proto.InternalMessageInfo - -type VirtualMachineStatistics struct { - Processor *VirtualMachineProcessorStatistics `protobuf:"bytes,1,opt,name=processor,proto3" json:"processor,omitempty"` - Memory *VirtualMachineMemoryStatistics `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineStatistics) Reset() { *m = VirtualMachineStatistics{} } -func (*VirtualMachineStatistics) ProtoMessage() {} -func (*VirtualMachineStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{5} -} -func (m *VirtualMachineStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineStatistics.Merge(m, src) -} -func (m *VirtualMachineStatistics) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineStatistics proto.InternalMessageInfo - -type VirtualMachineProcessorStatistics struct { - TotalRuntimeNS uint64 `protobuf:"varint,1,opt,name=total_runtime_ns,json=totalRuntimeNs,proto3" json:"total_runtime_ns,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineProcessorStatistics) Reset() { *m = VirtualMachineProcessorStatistics{} } -func (*VirtualMachineProcessorStatistics) ProtoMessage() {} -func (*VirtualMachineProcessorStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{6} -} -func (m *VirtualMachineProcessorStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineProcessorStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineProcessorStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineProcessorStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineProcessorStatistics.Merge(m, src) -} -func (m *VirtualMachineProcessorStatistics) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineProcessorStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineProcessorStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineProcessorStatistics proto.InternalMessageInfo - -type VirtualMachineMemoryStatistics struct { - WorkingSetBytes uint64 `protobuf:"varint,1,opt,name=working_set_bytes,json=workingSetBytes,proto3" json:"working_set_bytes,omitempty"` - VirtualNodeCount uint32 `protobuf:"varint,2,opt,name=virtual_node_count,json=virtualNodeCount,proto3" json:"virtual_node_count,omitempty"` - VmMemory *VirtualMachineMemory `protobuf:"bytes,3,opt,name=vm_memory,json=vmMemory,proto3" json:"vm_memory,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineMemoryStatistics) Reset() { *m = VirtualMachineMemoryStatistics{} } -func (*VirtualMachineMemoryStatistics) ProtoMessage() {} -func (*VirtualMachineMemoryStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{7} -} -func (m *VirtualMachineMemoryStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineMemoryStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineMemoryStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineMemoryStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineMemoryStatistics.Merge(m, src) -} -func (m *VirtualMachineMemoryStatistics) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineMemoryStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineMemoryStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineMemoryStatistics proto.InternalMessageInfo - -type VirtualMachineMemory struct { - AvailableMemory int32 `protobuf:"varint,1,opt,name=available_memory,json=availableMemory,proto3" json:"available_memory,omitempty"` - AvailableMemoryBuffer int32 `protobuf:"varint,2,opt,name=available_memory_buffer,json=availableMemoryBuffer,proto3" json:"available_memory_buffer,omitempty"` - ReservedMemory uint64 `protobuf:"varint,3,opt,name=reserved_memory,json=reservedMemory,proto3" json:"reserved_memory,omitempty"` - AssignedMemory uint64 `protobuf:"varint,4,opt,name=assigned_memory,json=assignedMemory,proto3" json:"assigned_memory,omitempty"` - SlpActive bool `protobuf:"varint,5,opt,name=slp_active,json=slpActive,proto3" json:"slp_active,omitempty"` - BalancingEnabled bool `protobuf:"varint,6,opt,name=balancing_enabled,json=balancingEnabled,proto3" json:"balancing_enabled,omitempty"` - DmOperationInProgress bool `protobuf:"varint,7,opt,name=dm_operation_in_progress,json=dmOperationInProgress,proto3" json:"dm_operation_in_progress,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineMemory) Reset() { *m = VirtualMachineMemory{} } -func (*VirtualMachineMemory) ProtoMessage() {} -func (*VirtualMachineMemory) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{8} -} -func (m *VirtualMachineMemory) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineMemory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineMemory.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineMemory) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineMemory.Merge(m, src) -} -func (m *VirtualMachineMemory) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineMemory) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineMemory.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineMemory proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Statistics)(nil), "containerd.runhcs.stats.v1.Statistics") - proto.RegisterType((*WindowsContainerStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerStatistics") - proto.RegisterType((*WindowsContainerProcessorStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerProcessorStatistics") - proto.RegisterType((*WindowsContainerMemoryStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerMemoryStatistics") - proto.RegisterType((*WindowsContainerStorageStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerStorageStatistics") - proto.RegisterType((*VirtualMachineStatistics)(nil), "containerd.runhcs.stats.v1.VirtualMachineStatistics") - proto.RegisterType((*VirtualMachineProcessorStatistics)(nil), "containerd.runhcs.stats.v1.VirtualMachineProcessorStatistics") - proto.RegisterType((*VirtualMachineMemoryStatistics)(nil), "containerd.runhcs.stats.v1.VirtualMachineMemoryStatistics") - proto.RegisterType((*VirtualMachineMemory)(nil), "containerd.runhcs.stats.v1.VirtualMachineMemory") -} - -func init() { - proto.RegisterFile("github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto", fileDescriptor_23217f96da3a05cc) -} - -var fileDescriptor_23217f96da3a05cc = []byte{ - // 1037 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x6d, 0x6f, 0xdb, 0x44, - 0x1c, 0x8f, 0xb3, 0x3e, 0x24, 0x37, 0xda, 0xb4, 0xb7, 0x76, 0x84, 0x48, 0x24, 0x6b, 0x90, 0xf6, - 0x00, 0x34, 0xa1, 0xa3, 0x1a, 0x1a, 0x0c, 0x4d, 0xa4, 0x02, 0x0d, 0xb1, 0x84, 0x72, 0xe9, 0x03, - 0x02, 0x21, 0x73, 0xb1, 0xaf, 0xee, 0xa9, 0xb6, 0xcf, 0xba, 0x3b, 0xbb, 0xac, 0xaf, 0xf8, 0x08, - 0x7c, 0xac, 0x22, 0xde, 0xec, 0x25, 0xaf, 0x32, 0x96, 0x6f, 0x80, 0x84, 0x78, 0x3d, 0xf9, 0xee, - 0x9c, 0xb8, 0xed, 0xba, 0xb6, 0xda, 0x9b, 0xc8, 0xfe, 0xff, 0x1e, 0xee, 0xff, 0x70, 0xe7, 0x0b, - 0x78, 0xea, 0x51, 0xb9, 0x1f, 0x0f, 0x5a, 0x0e, 0x0b, 0xda, 0x5d, 0xea, 0x70, 0x26, 0xd8, 0x9e, - 0x6c, 0xef, 0x3b, 0x42, 0xec, 0xd3, 0xa0, 0xed, 0x04, 0x6e, 0xdb, 0x61, 0xa1, 0xc4, 0x34, 0x24, - 0xdc, 0x5d, 0x4d, 0x63, 0xab, 0x3c, 0x0e, 0xf7, 0x1d, 0xb1, 0x9a, 0xac, 0xb5, 0x85, 0xc4, 0x52, - 0xe8, 0xdf, 0x56, 0xc4, 0x99, 0x64, 0xb0, 0x36, 0x21, 0xb7, 0x34, 0xaf, 0xa5, 0xe1, 0x64, 0xad, - 0xb6, 0xe4, 0x31, 0x8f, 0x29, 0x5a, 0x3b, 0x7d, 0xd2, 0x8a, 0x5a, 0xc3, 0x63, 0xcc, 0xf3, 0x49, - 0x5b, 0xbd, 0x0d, 0xe2, 0xbd, 0xb6, 0xa4, 0x01, 0x11, 0x12, 0x07, 0x91, 0x21, 0xac, 0xe7, 0x12, - 0x9c, 0xb8, 0xb7, 0x1d, 0x8f, 0xb3, 0x38, 0x32, 0xab, 0xb7, 0x93, 0xb5, 0x76, 0x40, 0x24, 0xa7, - 0x8e, 0x49, 0xa4, 0xf9, 0xbf, 0x05, 0x40, 0x5f, 0x62, 0x49, 0x85, 0xa4, 0x8e, 0x80, 0x08, 0xcc, - 0x1e, 0xd2, 0xd0, 0x65, 0x87, 0xa2, 0x6a, 0xdd, 0xb2, 0xee, 0x5e, 0xbf, 0xff, 0xa0, 0x75, 0x7e, - 0xa6, 0xad, 0x5d, 0x4d, 0xdd, 0xc8, 0x18, 0x13, 0xa3, 0x27, 0x05, 0x94, 0x19, 0xc1, 0x87, 0x60, - 0xda, 0xa7, 0x61, 0xfc, 0x5b, 0xb5, 0xa8, 0x1c, 0x57, 0x5a, 0x94, 0xe5, 0x4d, 0x4d, 0x82, 0xa9, - 0x5f, 0x57, 0xa7, 0xf6, 0xa4, 0x80, 0xb4, 0x02, 0x3e, 0x05, 0xc5, 0x24, 0xa8, 0x5e, 0x53, 0xba, - 0xf5, 0x37, 0x65, 0xb2, 0x43, 0xb9, 0x8c, 0xb1, 0xdf, 0xc5, 0xce, 0x3e, 0x0d, 0xc9, 0x24, 0x8f, - 0xce, 0xcc, 0x68, 0xd8, 0x28, 0xee, 0x74, 0x51, 0x31, 0x09, 0x3a, 0xd7, 0x41, 0x79, 0x6c, 0xd1, - 0xfc, 0xf7, 0x1a, 0xa8, 0x9d, 0x9f, 0x3f, 0xec, 0x80, 0xf2, 0xb8, 0xc1, 0xa6, 0x15, 0xb5, 0x96, - 0x1e, 0x41, 0x2b, 0x1b, 0x41, 0x6b, 0x2b, 0x63, 0x74, 0x4a, 0xc7, 0xc3, 0x46, 0xe1, 0x8f, 0x17, - 0x0d, 0x0b, 0x4d, 0x64, 0x70, 0x07, 0x2c, 0x8d, 0xd7, 0xb3, 0x85, 0xc4, 0x5c, 0xda, 0x29, 0x68, - 0xfa, 0x70, 0x39, 0x3b, 0xe8, 0xe4, 0x92, 0xe3, 0x32, 0xa5, 0xc0, 0x7b, 0xa0, 0x1c, 0x47, 0xa9, - 0x93, 0x1d, 0x0a, 0xd5, 0x9c, 0xa9, 0xce, 0x3b, 0xa3, 0x61, 0xa3, 0xb4, 0xad, 0x82, 0xbd, 0x3e, - 0x2a, 0x69, 0xb8, 0x27, 0xe0, 0x2f, 0xa0, 0x1c, 0x71, 0xe6, 0x10, 0x21, 0x18, 0xaf, 0x4e, 0xa9, - 0x75, 0x1f, 0x5f, 0x65, 0xa2, 0x9b, 0x99, 0x78, 0xd2, 0x1a, 0x34, 0x71, 0x84, 0x5b, 0x60, 0x26, - 0x20, 0x01, 0xe3, 0xcf, 0xaa, 0xd3, 0xca, 0xfb, 0xd1, 0x55, 0xbc, 0xbb, 0x4a, 0x99, 0x33, 0x36, - 0x5e, 0x70, 0x17, 0xcc, 0x0a, 0xc9, 0x38, 0xf6, 0x48, 0x75, 0x46, 0xd9, 0x7e, 0x79, 0xb5, 0x4d, - 0xa8, 0xa4, 0x39, 0xdf, 0xcc, 0xad, 0xf9, 0xc2, 0x02, 0x1f, 0x5c, 0xa2, 0x42, 0xf8, 0x08, 0x2c, - 0x48, 0x26, 0xb1, 0x6f, 0xf3, 0x38, 0xcc, 0xfa, 0x6c, 0xa9, 0x3e, 0xc3, 0xd1, 0xb0, 0x31, 0xbf, - 0x95, 0x62, 0x48, 0x43, 0xbd, 0x3e, 0x9a, 0x97, 0xf9, 0xf7, 0x74, 0xbf, 0x57, 0x32, 0x5d, 0x2c, - 0x08, 0x4f, 0xc5, 0x45, 0x25, 0x5e, 0x1c, 0x0d, 0x1b, 0x73, 0x86, 0xb7, 0x2d, 0x08, 0xef, 0xf5, - 0xd1, 0x1c, 0xcf, 0xbd, 0x0a, 0xf8, 0x18, 0x2c, 0x66, 0xd2, 0x03, 0xc2, 0x43, 0xe2, 0x4f, 0x26, - 0x7c, 0x63, 0x34, 0x6c, 0x54, 0x8c, 0xf8, 0x3b, 0x85, 0xf5, 0xfa, 0x28, 0x5b, 0xc8, 0x04, 0x44, - 0xf3, 0x3f, 0x0b, 0xdc, 0xba, 0xa8, 0xcf, 0xf0, 0x21, 0x78, 0x4f, 0x77, 0xda, 0x8e, 0x05, 0xf6, - 0x88, 0xed, 0xb0, 0x20, 0xa0, 0xd2, 0x1e, 0x3c, 0x93, 0xc4, 0xd4, 0x89, 0x6e, 0x6a, 0xc2, 0x76, - 0x8a, 0x6f, 0x28, 0xb8, 0x93, 0xa2, 0xb0, 0x03, 0xea, 0xaf, 0x93, 0x46, 0x04, 0x1f, 0x18, 0xbd, - 0x2a, 0x15, 0xd5, 0xce, 0xe8, 0x37, 0x09, 0x3e, 0xd0, 0x1e, 0x3f, 0x80, 0xdb, 0x27, 0x3c, 0x22, - 0x4e, 0x13, 0x2c, 0x89, 0x7d, 0xc8, 0xf8, 0x01, 0x0d, 0x3d, 0x5b, 0x90, 0x2c, 0x17, 0x55, 0x39, - 0x5a, 0xc9, 0x79, 0x6d, 0x6a, 0xee, 0xae, 0xa6, 0xf6, 0x89, 0x4e, 0x2b, 0x1d, 0xec, 0xca, 0x85, - 0xfb, 0x00, 0xde, 0x07, 0xcb, 0x9c, 0x60, 0xd7, 0x76, 0x58, 0x1c, 0x4a, 0x3b, 0x64, 0x3c, 0xc0, - 0x3e, 0x3d, 0x22, 0xae, 0xa9, 0xf9, 0x46, 0x0a, 0x6e, 0xa4, 0x58, 0x6f, 0x0c, 0xc1, 0xdb, 0xa0, - 0xa2, 0x34, 0x82, 0x1e, 0x91, 0x13, 0x15, 0xce, 0xa5, 0xe1, 0x3e, 0x3d, 0x22, 0xba, 0xa8, 0x75, - 0x70, 0xf3, 0x90, 0x53, 0x49, 0xce, 0x9a, 0xeb, 0x22, 0x96, 0x14, 0x7a, 0xda, 0xfd, 0x2e, 0x58, - 0xd0, 0xaa, 0x9c, 0xfd, 0x94, 0xe2, 0xcf, 0xab, 0xf8, 0xd8, 0xbf, 0xf9, 0x97, 0x05, 0xaa, 0xe7, - 0x7d, 0xe4, 0xe0, 0xcf, 0xf9, 0x53, 0x6e, 0x5d, 0x7c, 0x64, 0x4e, 0x1a, 0x5d, 0x70, 0xc6, 0xd1, - 0xf8, 0x8c, 0xeb, 0xef, 0xd6, 0xe7, 0x97, 0x77, 0x3e, 0xef, 0x84, 0x37, 0x31, 0x58, 0xb9, 0x30, - 0x87, 0xb7, 0x3b, 0x85, 0xcd, 0x3f, 0x2d, 0x50, 0x7f, 0x73, 0x36, 0xf0, 0x43, 0xb0, 0x78, 0x76, - 0xcf, 0xe9, 0xbd, 0x50, 0x39, 0x3c, 0xb9, 0xc3, 0xe0, 0xc7, 0x00, 0x26, 0xda, 0xcd, 0x0e, 0x99, - 0x6b, 0xc6, 0xac, 0x3a, 0x32, 0x87, 0x16, 0x0c, 0xd2, 0x63, 0xae, 0x9e, 0x30, 0xec, 0x82, 0x72, - 0x12, 0xd8, 0xa6, 0x6d, 0xfa, 0xfa, 0xfa, 0xe4, 0xaa, 0x6d, 0x43, 0xa5, 0x24, 0xd0, 0x4f, 0xcd, - 0xe7, 0x45, 0xb0, 0xf4, 0x3a, 0x0a, 0xbc, 0x07, 0x16, 0x70, 0x82, 0xa9, 0x8f, 0x07, 0x3e, 0xc9, - 0x96, 0x4b, 0x0b, 0x98, 0x46, 0x95, 0x71, 0xdc, 0x50, 0x1f, 0x80, 0x77, 0x4f, 0x53, 0xed, 0x41, - 0xbc, 0xb7, 0x47, 0xb8, 0xaa, 0x62, 0x1a, 0x2d, 0x9f, 0x52, 0x74, 0x14, 0x08, 0xef, 0xa4, 0x07, - 0x40, 0x10, 0x9e, 0x10, 0x37, 0x5f, 0xd0, 0x14, 0x9a, 0xcf, 0xc2, 0x66, 0x81, 0x3b, 0xa0, 0x82, - 0x85, 0xa0, 0x5e, 0x38, 0x21, 0x9a, 0xad, 0x9c, 0x85, 0x0d, 0xf1, 0x7d, 0x00, 0x84, 0x1f, 0xd9, - 0xd8, 0x91, 0x34, 0x21, 0xea, 0xe2, 0x28, 0xa1, 0xb2, 0xf0, 0xa3, 0xaf, 0x54, 0x00, 0x7e, 0x04, - 0x16, 0x07, 0xd8, 0xc7, 0xa1, 0x93, 0xce, 0x85, 0x84, 0x69, 0x42, 0xae, 0xba, 0x07, 0x4a, 0x68, - 0x61, 0x0c, 0x7c, 0xad, 0xe3, 0xf0, 0x33, 0x50, 0x75, 0x03, 0x9b, 0x45, 0x84, 0x63, 0x49, 0x59, - 0x68, 0xd3, 0xd0, 0x8e, 0x38, 0xf3, 0x38, 0x11, 0xa2, 0x3a, 0xab, 0x34, 0xcb, 0x6e, 0xf0, 0x7d, - 0x06, 0x7f, 0x1b, 0x6e, 0x1a, 0xb0, 0xf3, 0xeb, 0xf1, 0xcb, 0x7a, 0xe1, 0xef, 0x97, 0xf5, 0xc2, - 0xef, 0xa3, 0xba, 0x75, 0x3c, 0xaa, 0x5b, 0xcf, 0x47, 0x75, 0xeb, 0x9f, 0x51, 0xdd, 0xfa, 0xe9, - 0x9b, 0xb7, 0xfd, 0xa3, 0xf7, 0x85, 0xfa, 0xfd, 0xb1, 0x30, 0x98, 0x51, 0x37, 0xfb, 0xa7, 0xaf, - 0x02, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xe8, 0x0f, 0x6d, 0x3b, 0x0a, 0x00, 0x00, -} - -func (m *Statistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Statistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Container != nil { - nn1, err := m.Container.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn1 - } - if m.VM != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(m.VM.Size())) - n2, err := m.VM.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Statistics_Windows) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Windows != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Windows.Size())) - n3, err := m.Windows.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} -func (m *Statistics_Linux) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Linux != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Linux.Size())) - n4, err := m.Linux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - return i, nil -} -func (m *WindowsContainerStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerStatistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) - n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ContainerStartTime))) - n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ContainerStartTime, dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if m.UptimeNS != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.UptimeNS)) - } - if m.Processor != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Processor.Size())) - n7, err := m.Processor.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.Memory != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Memory.Size())) - n8, err := m.Memory.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.Storage != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Storage.Size())) - n9, err := m.Storage.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *WindowsContainerProcessorStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerProcessorStatistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TotalRuntimeNS != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.TotalRuntimeNS)) - } - if m.RuntimeUserNS != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.RuntimeUserNS)) - } - if m.RuntimeKernelNS != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.RuntimeKernelNS)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *WindowsContainerMemoryStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerMemoryStatistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MemoryUsageCommitBytes != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.MemoryUsageCommitBytes)) - } - if m.MemoryUsageCommitPeakBytes != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.MemoryUsageCommitPeakBytes)) - } - if m.MemoryUsagePrivateWorkingSetBytes != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.MemoryUsagePrivateWorkingSetBytes)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *WindowsContainerStorageStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerStorageStatistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ReadCountNormalized != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.ReadCountNormalized)) - } - if m.ReadSizeBytes != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.ReadSizeBytes)) - } - if m.WriteCountNormalized != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.WriteCountNormalized)) - } - if m.WriteSizeBytes != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.WriteSizeBytes)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *VirtualMachineStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineStatistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Processor != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Processor.Size())) - n10, err := m.Processor.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.Memory != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Memory.Size())) - n11, err := m.Memory.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *VirtualMachineProcessorStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineProcessorStatistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TotalRuntimeNS != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.TotalRuntimeNS)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *VirtualMachineMemoryStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineMemoryStatistics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.WorkingSetBytes != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.WorkingSetBytes)) - } - if m.VirtualNodeCount != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.VirtualNodeCount)) - } - if m.VmMemory != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(m.VmMemory.Size())) - n12, err := m.VmMemory.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *VirtualMachineMemory) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineMemory) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.AvailableMemory != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.AvailableMemory)) - } - if m.AvailableMemoryBuffer != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.AvailableMemoryBuffer)) - } - if m.ReservedMemory != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.ReservedMemory)) - } - if m.AssignedMemory != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.AssignedMemory)) - } - if m.SlpActive { - dAtA[i] = 0x28 - i++ - if m.SlpActive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.BalancingEnabled { - dAtA[i] = 0x30 - i++ - if m.BalancingEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.DmOperationInProgress { - dAtA[i] = 0x38 - i++ - if m.DmOperationInProgress { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintStats(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Statistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Container != nil { - n += m.Container.Size() - } - if m.VM != nil { - l = m.VM.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Statistics_Windows) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Windows != nil { - l = m.Windows.Size() - n += 1 + l + sovStats(uint64(l)) - } - return n -} -func (m *Statistics_Linux) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Linux != nil { - l = m.Linux.Size() - n += 1 + l + sovStats(uint64(l)) - } - return n -} -func (m *WindowsContainerStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovStats(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ContainerStartTime) - n += 1 + l + sovStats(uint64(l)) - if m.UptimeNS != 0 { - n += 1 + sovStats(uint64(m.UptimeNS)) - } - if m.Processor != nil { - l = m.Processor.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.Storage != nil { - l = m.Storage.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WindowsContainerProcessorStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TotalRuntimeNS != 0 { - n += 1 + sovStats(uint64(m.TotalRuntimeNS)) - } - if m.RuntimeUserNS != 0 { - n += 1 + sovStats(uint64(m.RuntimeUserNS)) - } - if m.RuntimeKernelNS != 0 { - n += 1 + sovStats(uint64(m.RuntimeKernelNS)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WindowsContainerMemoryStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MemoryUsageCommitBytes != 0 { - n += 1 + sovStats(uint64(m.MemoryUsageCommitBytes)) - } - if m.MemoryUsageCommitPeakBytes != 0 { - n += 1 + sovStats(uint64(m.MemoryUsageCommitPeakBytes)) - } - if m.MemoryUsagePrivateWorkingSetBytes != 0 { - n += 1 + sovStats(uint64(m.MemoryUsagePrivateWorkingSetBytes)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WindowsContainerStorageStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ReadCountNormalized != 0 { - n += 1 + sovStats(uint64(m.ReadCountNormalized)) - } - if m.ReadSizeBytes != 0 { - n += 1 + sovStats(uint64(m.ReadSizeBytes)) - } - if m.WriteCountNormalized != 0 { - n += 1 + sovStats(uint64(m.WriteCountNormalized)) - } - if m.WriteSizeBytes != 0 { - n += 1 + sovStats(uint64(m.WriteSizeBytes)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Processor != nil { - l = m.Processor.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineProcessorStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TotalRuntimeNS != 0 { - n += 1 + sovStats(uint64(m.TotalRuntimeNS)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineMemoryStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.WorkingSetBytes != 0 { - n += 1 + sovStats(uint64(m.WorkingSetBytes)) - } - if m.VirtualNodeCount != 0 { - n += 1 + sovStats(uint64(m.VirtualNodeCount)) - } - if m.VmMemory != nil { - l = m.VmMemory.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineMemory) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AvailableMemory != 0 { - n += 1 + sovStats(uint64(m.AvailableMemory)) - } - if m.AvailableMemoryBuffer != 0 { - n += 1 + sovStats(uint64(m.AvailableMemoryBuffer)) - } - if m.ReservedMemory != 0 { - n += 1 + sovStats(uint64(m.ReservedMemory)) - } - if m.AssignedMemory != 0 { - n += 1 + sovStats(uint64(m.AssignedMemory)) - } - if m.SlpActive { - n += 2 - } - if m.BalancingEnabled { - n += 2 - } - if m.DmOperationInProgress { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovStats(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozStats(x uint64) (n int) { - return sovStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Statistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Statistics{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `VM:` + strings.Replace(fmt.Sprintf("%v", this.VM), "VirtualMachineStatistics", "VirtualMachineStatistics", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Statistics_Windows) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Statistics_Windows{`, - `Windows:` + strings.Replace(fmt.Sprintf("%v", this.Windows), "WindowsContainerStatistics", "WindowsContainerStatistics", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Statistics_Linux) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Statistics_Linux{`, - `Linux:` + strings.Replace(fmt.Sprintf("%v", this.Linux), "Metrics", "v1.Metrics", 1) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerStatistics{`, - `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `ContainerStartTime:` + strings.Replace(strings.Replace(this.ContainerStartTime.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UptimeNS:` + fmt.Sprintf("%v", this.UptimeNS) + `,`, - `Processor:` + strings.Replace(fmt.Sprintf("%v", this.Processor), "WindowsContainerProcessorStatistics", "WindowsContainerProcessorStatistics", 1) + `,`, - `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "WindowsContainerMemoryStatistics", "WindowsContainerMemoryStatistics", 1) + `,`, - `Storage:` + strings.Replace(fmt.Sprintf("%v", this.Storage), "WindowsContainerStorageStatistics", "WindowsContainerStorageStatistics", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerProcessorStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerProcessorStatistics{`, - `TotalRuntimeNS:` + fmt.Sprintf("%v", this.TotalRuntimeNS) + `,`, - `RuntimeUserNS:` + fmt.Sprintf("%v", this.RuntimeUserNS) + `,`, - `RuntimeKernelNS:` + fmt.Sprintf("%v", this.RuntimeKernelNS) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerMemoryStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerMemoryStatistics{`, - `MemoryUsageCommitBytes:` + fmt.Sprintf("%v", this.MemoryUsageCommitBytes) + `,`, - `MemoryUsageCommitPeakBytes:` + fmt.Sprintf("%v", this.MemoryUsageCommitPeakBytes) + `,`, - `MemoryUsagePrivateWorkingSetBytes:` + fmt.Sprintf("%v", this.MemoryUsagePrivateWorkingSetBytes) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerStorageStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerStorageStatistics{`, - `ReadCountNormalized:` + fmt.Sprintf("%v", this.ReadCountNormalized) + `,`, - `ReadSizeBytes:` + fmt.Sprintf("%v", this.ReadSizeBytes) + `,`, - `WriteCountNormalized:` + fmt.Sprintf("%v", this.WriteCountNormalized) + `,`, - `WriteSizeBytes:` + fmt.Sprintf("%v", this.WriteSizeBytes) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineStatistics{`, - `Processor:` + strings.Replace(fmt.Sprintf("%v", this.Processor), "VirtualMachineProcessorStatistics", "VirtualMachineProcessorStatistics", 1) + `,`, - `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "VirtualMachineMemoryStatistics", "VirtualMachineMemoryStatistics", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineProcessorStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineProcessorStatistics{`, - `TotalRuntimeNS:` + fmt.Sprintf("%v", this.TotalRuntimeNS) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineMemoryStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineMemoryStatistics{`, - `WorkingSetBytes:` + fmt.Sprintf("%v", this.WorkingSetBytes) + `,`, - `VirtualNodeCount:` + fmt.Sprintf("%v", this.VirtualNodeCount) + `,`, - `VmMemory:` + strings.Replace(fmt.Sprintf("%v", this.VmMemory), "VirtualMachineMemory", "VirtualMachineMemory", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineMemory) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineMemory{`, - `AvailableMemory:` + fmt.Sprintf("%v", this.AvailableMemory) + `,`, - `AvailableMemoryBuffer:` + fmt.Sprintf("%v", this.AvailableMemoryBuffer) + `,`, - `ReservedMemory:` + fmt.Sprintf("%v", this.ReservedMemory) + `,`, - `AssignedMemory:` + fmt.Sprintf("%v", this.AssignedMemory) + `,`, - `SlpActive:` + fmt.Sprintf("%v", this.SlpActive) + `,`, - `BalancingEnabled:` + fmt.Sprintf("%v", this.BalancingEnabled) + `,`, - `DmOperationInProgress:` + fmt.Sprintf("%v", this.DmOperationInProgress) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringStats(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Statistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Statistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Statistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Windows", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WindowsContainerStatistics{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Container = &Statistics_Windows{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &v1.Metrics{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Container = &Statistics_Linux{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VM", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VM == nil { - m.VM = &VirtualMachineStatistics{} - } - if err := m.VM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerStartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ContainerStartTime, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UptimeNS", wireType) - } - m.UptimeNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UptimeNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Processor == nil { - m.Processor = &WindowsContainerProcessorStatistics{} - } - if err := m.Processor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &WindowsContainerMemoryStatistics{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Storage == nil { - m.Storage = &WindowsContainerStorageStatistics{} - } - if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerProcessorStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerProcessorStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerProcessorStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRuntimeNS", wireType) - } - m.TotalRuntimeNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRuntimeNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeUserNS", wireType) - } - m.RuntimeUserNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RuntimeUserNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeKernelNS", wireType) - } - m.RuntimeKernelNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RuntimeKernelNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerMemoryStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerMemoryStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerMemoryStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryUsageCommitBytes", wireType) - } - m.MemoryUsageCommitBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryUsageCommitBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryUsageCommitPeakBytes", wireType) - } - m.MemoryUsageCommitPeakBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryUsageCommitPeakBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryUsagePrivateWorkingSetBytes", wireType) - } - m.MemoryUsagePrivateWorkingSetBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryUsagePrivateWorkingSetBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerStorageStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerStorageStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerStorageStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadCountNormalized", wireType) - } - m.ReadCountNormalized = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadCountNormalized |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadSizeBytes", wireType) - } - m.ReadSizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadSizeBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WriteCountNormalized", wireType) - } - m.WriteCountNormalized = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WriteCountNormalized |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WriteSizeBytes", wireType) - } - m.WriteSizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WriteSizeBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Processor == nil { - m.Processor = &VirtualMachineProcessorStatistics{} - } - if err := m.Processor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &VirtualMachineMemoryStatistics{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineProcessorStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineProcessorStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineProcessorStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRuntimeNS", wireType) - } - m.TotalRuntimeNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRuntimeNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineMemoryStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineMemoryStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineMemoryStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingSetBytes", wireType) - } - m.WorkingSetBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WorkingSetBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VirtualNodeCount", wireType) - } - m.VirtualNodeCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VirtualNodeCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VmMemory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VmMemory == nil { - m.VmMemory = &VirtualMachineMemory{} - } - if err := m.VmMemory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineMemory) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineMemory: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineMemory: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableMemory", wireType) - } - m.AvailableMemory = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableMemory |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableMemoryBuffer", wireType) - } - m.AvailableMemoryBuffer = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableMemoryBuffer |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReservedMemory", wireType) - } - m.ReservedMemory = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReservedMemory |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AssignedMemory", wireType) - } - m.AssignedMemory = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AssignedMemory |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SlpActive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SlpActive = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BalancingEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BalancingEnabled = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DmOperationInProgress", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DmOperationInProgress = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStats(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStats - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipStats(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthStats = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStats = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto deleted file mode 100644 index ea0b6b6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package containerd.runhcs.stats.v1; - -import weak "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "github.com/containerd/cgroups/stats/v1/metrics.proto"; - -option go_package = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats;stats"; - -message Statistics { - oneof container { - WindowsContainerStatistics windows = 1; - io.containerd.cgroups.v1.Metrics linux = 2; - } - VirtualMachineStatistics vm = 3 [(gogoproto.customname) = "VM"]; -} - -message WindowsContainerStatistics { - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp container_start_time = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - uint64 uptime_ns = 3 [(gogoproto.customname) = "UptimeNS"]; - WindowsContainerProcessorStatistics processor = 4; - WindowsContainerMemoryStatistics memory = 5; - WindowsContainerStorageStatistics storage = 6; -} - -message WindowsContainerProcessorStatistics { - uint64 total_runtime_ns = 1 [(gogoproto.customname) = "TotalRuntimeNS"]; - uint64 runtime_user_ns = 2 [(gogoproto.customname) = "RuntimeUserNS"]; - uint64 runtime_kernel_ns = 3 [(gogoproto.customname) = "RuntimeKernelNS"]; -} - -message WindowsContainerMemoryStatistics { - uint64 memory_usage_commit_bytes = 1; - uint64 memory_usage_commit_peak_bytes = 2; - uint64 memory_usage_private_working_set_bytes = 3; -} - -message WindowsContainerStorageStatistics { - uint64 read_count_normalized = 1; - uint64 read_size_bytes = 2; - uint64 write_count_normalized = 3; - uint64 write_size_bytes = 4; -} - -message VirtualMachineStatistics { - VirtualMachineProcessorStatistics processor = 1; - VirtualMachineMemoryStatistics memory = 2; -} - -message VirtualMachineProcessorStatistics { - uint64 total_runtime_ns = 1 [(gogoproto.customname) = "TotalRuntimeNS"]; -} - -message VirtualMachineMemoryStatistics { - uint64 working_set_bytes = 1; - uint32 virtual_node_count = 2; - VirtualMachineMemory vm_memory = 3; -} - -message VirtualMachineMemory { - int32 available_memory = 1; - int32 available_memory_buffer = 2; - uint64 reserved_memory = 3; - uint64 assigned_memory = 4; - bool slp_active = 5; - bool balancing_enabled = 6; - bool dm_operation_in_progress = 7; -} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go deleted file mode 100644 index 7f1f282..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go +++ /dev/null @@ -1,38 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// AttachLayerStorageFilter sets up the layer storage filter on a writable -// container layer. -// -// `layerPath` is a path to a directory the writable layer is mounted. If the -// path does not end in a `\` the platform will append it automatically. -// -// `layerData` is the parent read-only layer data. -func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim.AttachLayerStorageFilter" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to attach layer storage filter") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go deleted file mode 100644 index 8e28e6c..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go +++ /dev/null @@ -1,26 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// DestroyLayer deletes a container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -func DestroyLayer(ctx context.Context, layerPath string) (err error) { - title := "hcsshim.DestroyLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) - - err = hcsDestroyLayer(layerPath) - if err != nil { - return errors.Wrap(err, "failed to destroy layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go deleted file mode 100644 index 4354732..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go +++ /dev/null @@ -1,26 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { - title := "hcsshim.DetachLayerStorageFilter" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) - - err = hcsDetachLayerStorageFilter(layerPath) - if err != nil { - return errors.Wrap(err, "failed to detach layer storage filter") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go deleted file mode 100644 index a1b12dd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go +++ /dev/null @@ -1,46 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// ExportLayer exports a container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -// -// `exportFolderPath` is a pre-existing folder to export the layer to. -// -// `layerData` is the parent layer data. -// -// `options` are the export options applied to the exported layer. -func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { - title := "hcsshim.ExportLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("exportFolderPath", exportFolderPath), - ) - - ldbytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - obytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) - if err != nil { - return errors.Wrap(err, "failed to export layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go deleted file mode 100644 index 83c0fa3..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go +++ /dev/null @@ -1,26 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" -) - -// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. -// -// If the VHD is not mounted it will be temporarily mounted. -func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { - title := "hcsshim.FormatWritableLayerVhd" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - err = hcsFormatWritableLayerVhd(vhdHandle) - if err != nil { - return errors.Wrap(err, "failed to format writable layer vhd") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go deleted file mode 100644 index 87fee45..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go +++ /dev/null @@ -1,193 +0,0 @@ -package computestorage - -import ( - "context" - "os" - "path/filepath" - "syscall" - - "github.com/Microsoft/go-winio/pkg/security" - "github.com/Microsoft/go-winio/vhd" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -const defaultVHDXBlockSizeInMB = 1 - -// SetupContainerBaseLayer is a helper to setup a containers scratch. It -// will create and format the vhdx's inside and the size is configurable with the sizeInGB -// parameter. -// -// `layerPath` is the path to the base container layer on disk. -// -// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. -// -// `diffVhdPath` is the path where the differencing disk for the base layer should be created. -// -// `sizeInGB` is the size in gigabytes to make the base vhdx. -func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { - var ( - hivesPath = filepath.Join(layerPath, "Hives") - layoutPath = filepath.Join(layerPath, "Layout") - ) - - // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files - // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and - // differencing disks if they exist in case we're asking for a different size. - if _, err := os.Stat(hivesPath); err == nil { - if err := os.RemoveAll(hivesPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting hives directory") - } - } - if _, err := os.Stat(layoutPath); err == nil { - if err := os.RemoveAll(layoutPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting layout file") - } - } - - if _, err := os.Stat(baseVhdPath); err == nil { - if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx path") - } - } - if _, err := os.Stat(diffVhdPath); err == nil { - if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") - } - } - - createParams := &vhd.CreateVirtualDiskParameters{ - Version: 2, - Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * 1024 * 1024 * 1024, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, - }, - } - handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) - if err != nil { - return errors.Wrap(err, "failed to create vhdx") - } - - defer func() { - if err != nil { - _ = syscall.CloseHandle(handle) - os.RemoveAll(baseVhdPath) - os.RemoveAll(diffVhdPath) - } - }() - - if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { - return err - } - // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer - if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") - } - - options := OsLayerOptions{ - Type: OsLayerTypeContainer, - } - - // SetupBaseOSLayer expects an empty vhd handle for a container layer and will - // error out otherwise. - if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { - return err - } - // Create the differencing disk that will be what's copied for the final rw layer - // for a container. - if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") - } - - if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) - } - if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) - } - return nil -} - -// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format -// the vhdx inside and the size is configurable by the sizeInGB parameter. -// -// `uvmPath` is the path to the UtilityVM filesystem. -// -// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. -// -// `diffVhdPath` is the path where the differencing disk for the UVM should be created. -// -// `sizeInGB` specifies the size in gigabytes to make the base vhdx. -func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { - // Remove the base and differencing disks if they exist in case we're asking for a different size. - if _, err := os.Stat(baseVhdPath); err == nil { - if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx") - } - } - if _, err := os.Stat(diffVhdPath); err == nil { - if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") - } - } - - // Just create the vhdx for utilityVM layer, no need to format it. - createParams := &vhd.CreateVirtualDiskParameters{ - Version: 2, - Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * 1024 * 1024 * 1024, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, - }, - } - handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) - if err != nil { - return errors.Wrap(err, "failed to create vhdx") - } - - defer func() { - if err != nil { - _ = syscall.CloseHandle(handle) - os.RemoveAll(baseVhdPath) - os.RemoveAll(diffVhdPath) - } - }() - - // If it is a UtilityVM layer then the base vhdx must be attached when calling - // `SetupBaseOSLayer` - attachParams := &vhd.AttachVirtualDiskParameters{ - Version: 2, - } - if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { - return errors.Wrapf(err, "failed to attach virtual disk") - } - - options := OsLayerOptions{ - Type: OsLayerTypeVM, - } - if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { - return err - } - - // Detach and close the handle after setting up the layer as we don't need the handle - // for anything else and we no longer need to be attached either. - if err = vhd.DetachVirtualDisk(handle); err != nil { - return errors.Wrap(err, "failed to detach vhdx") - } - if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") - } - - // Create the differencing disk that will be what's copied for the final rw layer - // for a container. - if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") - } - - if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) - } - if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go deleted file mode 100644 index 0c61dab..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go +++ /dev/null @@ -1,41 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// ImportLayer imports a container layer. -// -// `layerPath` is a path to a directory to import the layer to. If the directory -// does not exist it will be automatically created. -// -// `sourceFolderpath` is a pre-existing folder that contains the layer to -// import. -// -// `layerData` is the parent layer data. -func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { - title := "hcsshim.ImportLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("sourceFolderPath", sourceFolderPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to import layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go deleted file mode 100644 index 53ed8ea..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go +++ /dev/null @@ -1,38 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// InitializeWritableLayer initializes a writable layer for a container. -// -// `layerPath` is a path to a directory the layer is mounted. If the -// path does not end in a `\` the platform will append it automatically. -// -// `layerData` is the parent read-only layer data. -func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim.InitializeWritableLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - // Options are not used in the platform as of RS5 - err = hcsInitializeWritableLayer(layerPath, string(bytes), "") - if err != nil { - return errors.Wrap(err, "failed to intitialize container layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go deleted file mode 100644 index fcdbbef..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go +++ /dev/null @@ -1,27 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" -) - -// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. -func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { - title := "hcsshim.GetLayerVhdMountPath" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - var mountPath *uint16 - err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) - if err != nil { - return "", errors.Wrap(err, "failed to get vhd mount path") - } - path = interop.ConvertAndFreeCoTaskMemString(mountPath) - return path, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go deleted file mode 100644 index 06aaf84..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go +++ /dev/null @@ -1,74 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" -) - -// SetupBaseOSLayer sets up a layer that contains a base OS for a container. -// -// `layerPath` is a path to a directory containing the layer. -// -// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` -// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type -// == OsLayerTypeVm`. -// -// `options` are the options applied while processing the layer. -func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { - title := "hcsshim.SetupBaseOSLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") - } - return nil -} - -// SetupBaseOSVolume sets up a volume that contains a base OS for a container. -// -// `layerPath` is a path to a directory containing the layer. -// -// `volumePath` is the path to the volume to be used for setup. -// -// `options` are the options applied while processing the layer. -func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { - if osversion.Build() < 19645 { - return errors.New("SetupBaseOSVolume is not present on builds older than 19645") - } - title := "hcsshim.SetupBaseOSVolume" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("volumePath", volumePath), - ) - - bytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go deleted file mode 100644 index 95aff9c..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced -// separate from the original graphdriver calls intended to give more freedom around creating -// and managing container layers and scratch spaces. -package computestorage - -import ( - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go - -//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? -//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? -//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? -//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? -//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? -//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? -//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? -//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? -//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? -//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? - -// LayerData is the data used to describe parent layer information. -type LayerData struct { - SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` - Layers []hcsschema.Layer `json:"Layers,omitempty"` -} - -// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. -type ExportLayerOptions struct { - IsWritableLayer bool `json:"IsWritableLayer,omitempty"` -} - -// OsLayerType is the type of layer being operated on. -type OsLayerType string - -const ( - // OsLayerTypeContainer is a container layer. - OsLayerTypeContainer OsLayerType = "Container" - // OsLayerTypeVM is a virtual machine layer. - OsLayerTypeVM OsLayerType = "Vm" -) - -// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and -// `SetupBaseOSVolume` calls. -type OsLayerOptions struct { - Type OsLayerType `json:"Type,omitempty"` - DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` - SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go deleted file mode 100644 index 4f95180..0000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ /dev/null @@ -1,319 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package computestorage - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") - - procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") - procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") - procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") - procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") - procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") - procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") - procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") - procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") - procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") - procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") -) - -func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - return _hcsImportLayer(_p0, _p1, _p2) -} - -func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { - if hr = procHcsImportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - var _p3 *uint16 - _p3, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsExportLayer(_p0, _p1, _p2, _p3) -} - -func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { - if hr = procHcsExportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsDestroyLayer(layerPath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - return _hcsDestroyLayer(_p0) -} - -func _hcsDestroyLayer(layerPath *uint16) (hr error) { - if hr = procHcsDestoryLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSetupBaseOSLayer(_p0, handle, _p1) -} - -func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { - if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsInitializeWritableLayer(_p0, _p1, _p2) -} - -func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { - if hr = procHcsInitializeWritableLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - return _hcsAttachLayerStorageFilter(_p0, _p1) -} - -func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { - if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsDetachLayerStorageFilter(layerPath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - return _hcsDetachLayerStorageFilter(_p0) -} - -func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { - if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { - if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { - if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(volumePath) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSetupBaseOSVolume(_p0, _p1, _p2) -} - -func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { - if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go deleted file mode 100644 index bfd7228..0000000 --- a/vendor/github.com/Microsoft/hcsshim/container.go +++ /dev/null @@ -1,223 +0,0 @@ -package hcsshim - -import ( - "context" - "fmt" - "os" - "sync" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - "github.com/Microsoft/hcsshim/internal/mergemaps" -) - -// ContainerProperties holds the properties for a container and the processes running in that container -type ContainerProperties = schema1.ContainerProperties - -// MemoryStats holds the memory statistics for a container -type MemoryStats = schema1.MemoryStats - -// ProcessorStats holds the processor statistics for a container -type ProcessorStats = schema1.ProcessorStats - -// StorageStats holds the storage statistics for a container -type StorageStats = schema1.StorageStats - -// NetworkStats holds the network statistics for a container -type NetworkStats = schema1.NetworkStats - -// Statistics is the structure returned by a statistics call on a container -type Statistics = schema1.Statistics - -// ProcessList is the structure of an item returned by a ProcessList call on a container -type ProcessListItem = schema1.ProcessListItem - -// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container -type MappedVirtualDiskController = schema1.MappedVirtualDiskController - -// Type of Request Support in ModifySystem -type RequestType = schema1.RequestType - -// Type of Resource Support in ModifySystem -type ResourceType = schema1.ResourceType - -// RequestType const -const ( - Add = schema1.Add - Remove = schema1.Remove - Network = schema1.Network -) - -// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse - -type container struct { - system *hcs.System - waitOnce sync.Once - waitErr error - waitCh chan struct{} -} - -// createComputeSystemAdditionalJSON is read from the environment at initialisation -// time. It allows an environment variable to define additional JSON which -// is merged in the CreateComputeSystem call to HCS. -var createContainerAdditionalJSON []byte - -func init() { - createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")) -} - -// CreateContainer creates a new container with the given configuration but does not start it. -func CreateContainer(id string, c *ContainerConfig) (Container, error) { - fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) - if err != nil { - return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) - } - - system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) - if err != nil { - return nil, err - } - return &container{system: system}, err -} - -// OpenContainer opens an existing container by ID. -func OpenContainer(id string) (Container, error) { - system, err := hcs.OpenComputeSystem(context.Background(), id) - if err != nil { - return nil, err - } - return &container{system: system}, err -} - -// GetContainers gets a list of the containers on the system that match the query -func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { - return hcs.GetComputeSystems(context.Background(), q) -} - -// Start synchronously starts the container. -func (container *container) Start() error { - return convertSystemError(container.system.Start(context.Background()), container) -} - -// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. -func (container *container) Shutdown() error { - err := container.system.Shutdown(context.Background()) - if err != nil { - return convertSystemError(err, container) - } - return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"} -} - -// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. -func (container *container) Terminate() error { - err := container.system.Terminate(context.Background()) - if err != nil { - return convertSystemError(err, container) - } - return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"} -} - -// Waits synchronously waits for the container to shutdown or terminate. -func (container *container) Wait() error { - err := container.system.Wait() - if err == nil { - err = container.system.ExitError() - } - return convertSystemError(err, container) -} - -// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It -// returns false if timeout occurs. -func (container *container) WaitTimeout(timeout time.Duration) error { - container.waitOnce.Do(func() { - container.waitCh = make(chan struct{}) - go func() { - container.waitErr = container.Wait() - close(container.waitCh) - }() - }) - t := time.NewTimer(timeout) - defer t.Stop() - select { - case <-t.C: - return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"} - case <-container.waitCh: - return container.waitErr - } -} - -// Pause pauses the execution of a container. -func (container *container) Pause() error { - return convertSystemError(container.system.Pause(context.Background()), container) -} - -// Resume resumes the execution of a container. -func (container *container) Resume() error { - return convertSystemError(container.system.Resume(context.Background()), container) -} - -// HasPendingUpdates returns true if the container has updates pending to install -func (container *container) HasPendingUpdates() (bool, error) { - return false, nil -} - -// Statistics returns statistics for the container. This is a legacy v1 call -func (container *container) Statistics() (Statistics, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics) - if err != nil { - return Statistics{}, convertSystemError(err, container) - } - - return properties.Statistics, nil -} - -// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call -func (container *container) ProcessList() ([]ProcessListItem, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList) - if err != nil { - return nil, convertSystemError(err, container) - } - - return properties.ProcessList, nil -} - -// This is a legacy v1 call -func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk) - if err != nil { - return nil, convertSystemError(err, container) - } - - return properties.MappedVirtualDiskControllers, nil -} - -// CreateProcess launches a new process within the container. -func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { - p, err := container.system.CreateProcess(context.Background(), c) - if err != nil { - return nil, convertSystemError(err, container) - } - return &process{p: p.(*hcs.Process)}, nil -} - -// OpenProcess gets an interface to an existing process within the container. -func (container *container) OpenProcess(pid int) (Process, error) { - p, err := container.system.OpenProcess(context.Background(), pid) - if err != nil { - return nil, convertSystemError(err, container) - } - return &process{p: p}, nil -} - -// Close cleans up any state associated with the container but does not terminate or wait for it. -func (container *container) Close() error { - return convertSystemError(container.system.Close(), container) -} - -// Modify the System -func (container *container) Modify(config *ResourceModificationRequestResponse) error { - return convertSystemError(container.system.Modify(context.Background(), config), container) -} diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go deleted file mode 100644 index f367022..0000000 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ /dev/null @@ -1,245 +0,0 @@ -package hcsshim - -import ( - "fmt" - "syscall" - - "github.com/Microsoft/hcsshim/internal/hns" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcserror" -) - -var ( - // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist - ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrElementNotFound = hcs.ErrElementNotFound - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrNotSupported = hcs.ErrNotSupported - - // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported - // decimal -2147024883 / hex 0x8007000d - ErrInvalidData = hcs.ErrInvalidData - - // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed - ErrHandleClose = hcs.ErrHandleClose - - // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method - ErrAlreadyClosed = hcs.ErrAlreadyClosed - - // ErrInvalidNotificationType is an error encountered when an invalid notification type is used - ErrInvalidNotificationType = hcs.ErrInvalidNotificationType - - // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation - ErrInvalidProcessState = hcs.ErrInvalidProcessState - - // ErrTimeout is an error encountered when waiting on a notification times out - ErrTimeout = hcs.ErrTimeout - - // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for - // a different expected notification - ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit - - // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service - // is lost while waiting for a notification - ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort - - // ErrUnexpectedValue is an error encountered when hcs returns an invalid value - ErrUnexpectedValue = hcs.ErrUnexpectedValue - - // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container - ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped - - // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously - ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending - - // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation - ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState - - // ErrProcNotFound is an error encountered when a procedure look up fails. - ErrProcNotFound = hcs.ErrProcNotFound - - // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 - // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. - ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied - - // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management - ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON - - // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message - ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage - - // ErrNotSupported is an error encountered when hcs doesn't support the request - ErrPlatformNotSupported = hcs.ErrPlatformNotSupported -) - -type EndpointNotFoundError = hns.EndpointNotFoundError -type NetworkNotFoundError = hns.NetworkNotFoundError - -// ProcessError is an error encountered in HCS during an operation on a Process object -type ProcessError struct { - Process *process - Operation string - Err error - Events []hcs.ErrorEvent -} - -// ContainerError is an error encountered in HCS during an operation on a Container object -type ContainerError struct { - Container *container - Operation string - Err error - Events []hcs.ErrorEvent -} - -func (e *ContainerError) Error() string { - if e == nil { - return "" - } - - if e.Container == nil { - return "unexpected nil container for error: " + e.Err.Error() - } - - s := "container " + e.Container.system.ID() - - if e.Operation != "" { - s += " encountered an error during " + e.Operation - } - - switch e.Err.(type) { - case nil: - break - case syscall.Errno: - s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) - default: - s += fmt.Sprintf(": %s", e.Err.Error()) - } - - for _, ev := range e.Events { - s += "\n" + ev.String() - } - - return s -} - -func (e *ProcessError) Error() string { - if e == nil { - return "" - } - - if e.Process == nil { - return "Unexpected nil process for error: " + e.Err.Error() - } - - s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID()) - if e.Operation != "" { - s += " encountered an error during " + e.Operation - } - - switch e.Err.(type) { - case nil: - break - case syscall.Errno: - s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) - default: - s += fmt.Sprintf(": %s", e.Err.Error()) - } - - for _, ev := range e.Events { - s += "\n" + ev.String() - } - - return s -} - -// IsNotExist checks if an error is caused by the Container or Process not existing. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsNotExist(err error) bool { - if _, ok := err.(EndpointNotFoundError); ok { - return true - } - if _, ok := err.(NetworkNotFoundError); ok { - return true - } - return hcs.IsNotExist(getInnerError(err)) -} - -// IsAlreadyClosed checks if an error is caused by the Container or Process having been -// already closed by a call to the Close() method. -func IsAlreadyClosed(err error) bool { - return hcs.IsAlreadyClosed(getInnerError(err)) -} - -// IsPending returns a boolean indicating whether the error is that -// the requested operation is being completed in the background. -func IsPending(err error) bool { - return hcs.IsPending(getInnerError(err)) -} - -// IsTimeout returns a boolean indicating whether the error is caused by -// a timeout waiting for the operation to complete. -func IsTimeout(err error) bool { - return hcs.IsTimeout(getInnerError(err)) -} - -// IsAlreadyStopped returns a boolean indicating whether the error is caused by -// a Container or Process being already stopped. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsAlreadyStopped(err error) bool { - return hcs.IsAlreadyStopped(getInnerError(err)) -} - -// IsNotSupported returns a boolean indicating whether the error is caused by -// unsupported platform requests -// Note: Currently Unsupported platform requests can be mean either -// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage -// is thrown from the Platform -func IsNotSupported(err error) bool { - return hcs.IsNotSupported(getInnerError(err)) -} - -// IsOperationInvalidState returns true when err is caused by -// `ErrVmcomputeOperationInvalidState`. -func IsOperationInvalidState(err error) bool { - return hcs.IsOperationInvalidState(getInnerError(err)) -} - -// IsAccessIsDenied returns true when err is caused by -// `ErrVmcomputeOperationAccessIsDenied`. -func IsAccessIsDenied(err error) bool { - return hcs.IsAccessIsDenied(getInnerError(err)) -} - -func getInnerError(err error) error { - switch pe := err.(type) { - case nil: - return nil - case *ContainerError: - err = pe.Err - case *ProcessError: - err = pe.Err - } - return err -} - -func convertSystemError(err error, c *container) error { - if serr, ok := err.(*hcs.SystemError); ok { - return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} - } - return err -} - -func convertProcessError(err error, p *process) error { - if perr, ok := err.(*hcs.ProcessError); ok { - return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} - } - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go b/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go deleted file mode 100644 index 42ac190..0000000 --- a/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go +++ /dev/null @@ -1,1328 +0,0 @@ -package compactext4 - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "path" - "sort" - "strings" - "time" - - "github.com/Microsoft/hcsshim/ext4/internal/format" -) - -// Writer writes a compact ext4 file system. -type Writer struct { - f io.ReadWriteSeeker - bw *bufio.Writer - inodes []*inode - curName string - curInode *inode - pos int64 - dataWritten, dataMax int64 - err error - initialized bool - supportInlineData bool - maxDiskSize int64 - gdBlocks uint32 -} - -// Mode flags for Linux files. -const ( - S_IXOTH = format.S_IXOTH - S_IWOTH = format.S_IWOTH - S_IROTH = format.S_IROTH - S_IXGRP = format.S_IXGRP - S_IWGRP = format.S_IWGRP - S_IRGRP = format.S_IRGRP - S_IXUSR = format.S_IXUSR - S_IWUSR = format.S_IWUSR - S_IRUSR = format.S_IRUSR - S_ISVTX = format.S_ISVTX - S_ISGID = format.S_ISGID - S_ISUID = format.S_ISUID - S_IFIFO = format.S_IFIFO - S_IFCHR = format.S_IFCHR - S_IFDIR = format.S_IFDIR - S_IFBLK = format.S_IFBLK - S_IFREG = format.S_IFREG - S_IFLNK = format.S_IFLNK - S_IFSOCK = format.S_IFSOCK - - TypeMask = format.TypeMask -) - -type inode struct { - Size int64 - Atime, Ctime, Mtime, Crtime uint64 - Number format.InodeNumber - Mode uint16 - Uid, Gid uint32 - LinkCount uint32 - XattrBlock uint32 - BlockCount uint32 - Devmajor, Devminor uint32 - Flags format.InodeFlag - Data []byte - XattrInline []byte - Children directory -} - -func (node *inode) FileType() uint16 { - return node.Mode & format.TypeMask -} - -func (node *inode) IsDir() bool { - return node.FileType() == S_IFDIR -} - -// A File represents a file to be added to an ext4 file system. -type File struct { - Linkname string - Size int64 - Mode uint16 - Uid, Gid uint32 - Atime, Ctime, Mtime, Crtime time.Time - Devmajor, Devminor uint32 - Xattrs map[string][]byte -} - -const ( - inodeFirst = 11 - inodeLostAndFound = inodeFirst - - blockSize = 4096 - blocksPerGroup = blockSize * 8 - inodeSize = 256 - maxInodesPerGroup = blockSize * 8 // Limited by the inode bitmap - inodesPerGroupIncrement = blockSize / inodeSize - - defaultMaxDiskSize = 16 * 1024 * 1024 * 1024 // 16GB - maxMaxDiskSize = 16 * 1024 * 1024 * 1024 * 1024 // 16TB - - groupDescriptorSize = 32 // Use the small group descriptor - groupsPerDescriptorBlock = blockSize / groupDescriptorSize - - maxFileSize = 128 * 1024 * 1024 * 1024 // 128GB file size maximum for now - smallSymlinkSize = 59 // max symlink size that goes directly in the inode - maxBlocksPerExtent = 0x8000 // maximum number of blocks in an extent - inodeDataSize = 60 - inodeUsedSize = 152 // fields through CrtimeExtra - inodeExtraSize = inodeSize - inodeUsedSize - xattrInodeOverhead = 4 + 4 // magic number + empty next entry value - xattrBlockOverhead = 32 + 4 // header + empty next entry value - inlineDataXattrOverhead = xattrInodeOverhead + 16 + 4 // entry + "data" - inlineDataSize = inodeDataSize + inodeExtraSize - inlineDataXattrOverhead -) - -type exceededMaxSizeError struct { - Size int64 -} - -func (err exceededMaxSizeError) Error() string { - return fmt.Sprintf("disk exceeded maximum size of %d bytes", err.Size) -} - -var directoryEntrySize = binary.Size(format.DirectoryEntry{}) -var extraIsize = uint16(inodeUsedSize - 128) - -type directory map[string]*inode - -func splitFirst(p string) (string, string) { - n := strings.IndexByte(p, '/') - if n >= 0 { - return p[:n], p[n+1:] - } - return p, "" -} - -func (w *Writer) findPath(root *inode, p string) *inode { - inode := root - for inode != nil && len(p) != 0 { - name, rest := splitFirst(p) - p = rest - inode = inode.Children[name] - } - return inode -} - -func timeToFsTime(t time.Time) uint64 { - if t.IsZero() { - return 0 - } - s := t.Unix() - if s < -0x80000000 { - return 0x80000000 - } - if s > 0x37fffffff { - return 0x37fffffff - } - return uint64(s) | uint64(t.Nanosecond())<<34 -} - -func fsTimeToTime(t uint64) time.Time { - if t == 0 { - return time.Time{} - } - s := int64(t & 0x3ffffffff) - if s > 0x7fffffff && s < 0x100000000 { - s = int64(int32(uint32(s))) - } - return time.Unix(s, int64(t>>34)) -} - -func (w *Writer) getInode(i format.InodeNumber) *inode { - if i == 0 || int(i) > len(w.inodes) { - return nil - } - return w.inodes[i-1] -} - -var xattrPrefixes = []struct { - Index uint8 - Prefix string -}{ - {2, "system.posix_acl_access"}, - {3, "system.posix_acl_default"}, - {8, "system.richacl"}, - {7, "system."}, - {1, "user."}, - {4, "trusted."}, - {6, "security."}, -} - -func compressXattrName(name string) (uint8, string) { - for _, p := range xattrPrefixes { - if strings.HasPrefix(name, p.Prefix) { - return p.Index, name[len(p.Prefix):] - } - } - return 0, name -} - -func decompressXattrName(index uint8, name string) string { - for _, p := range xattrPrefixes { - if index == p.Index { - return p.Prefix + name - } - } - return name -} - -func hashXattrEntry(name string, value []byte) uint32 { - var hash uint32 - for i := 0; i < len(name); i++ { - hash = (hash << 5) ^ (hash >> 27) ^ uint32(name[i]) - } - - for i := 0; i+3 < len(value); i += 4 { - hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(value[i:i+4]) - } - - if len(value)%4 != 0 { - var last [4]byte - copy(last[:], value[len(value)&^3:]) - hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(last[:]) - } - return hash -} - -type xattr struct { - Name string - Index uint8 - Value []byte -} - -func (x *xattr) EntryLen() int { - return (len(x.Name)+3)&^3 + 16 -} - -func (x *xattr) ValueLen() int { - return (len(x.Value) + 3) &^ 3 -} - -type xattrState struct { - inode, block []xattr - inodeLeft, blockLeft int -} - -func (s *xattrState) init() { - s.inodeLeft = inodeExtraSize - xattrInodeOverhead - s.blockLeft = blockSize - xattrBlockOverhead -} - -func (s *xattrState) addXattr(name string, value []byte) bool { - index, name := compressXattrName(name) - x := xattr{ - Index: index, - Name: name, - Value: value, - } - length := x.EntryLen() + x.ValueLen() - if s.inodeLeft >= length { - s.inode = append(s.inode, x) - s.inodeLeft -= length - } else if s.blockLeft >= length { - s.block = append(s.block, x) - s.blockLeft -= length - } else { - return false - } - return true -} - -func putXattrs(xattrs []xattr, b []byte, offsetDelta uint16) { - offset := uint16(len(b)) + offsetDelta - eb := b - db := b - for _, xattr := range xattrs { - vl := xattr.ValueLen() - offset -= uint16(vl) - eb[0] = uint8(len(xattr.Name)) - eb[1] = xattr.Index - binary.LittleEndian.PutUint16(eb[2:], offset) - binary.LittleEndian.PutUint32(eb[8:], uint32(len(xattr.Value))) - binary.LittleEndian.PutUint32(eb[12:], hashXattrEntry(xattr.Name, xattr.Value)) - copy(eb[16:], xattr.Name) - eb = eb[xattr.EntryLen():] - copy(db[len(db)-vl:], xattr.Value) - db = db[:len(db)-vl] - } -} - -func getXattrs(b []byte, xattrs map[string][]byte, offsetDelta uint16) { - eb := b - for len(eb) != 0 { - nameLen := eb[0] - if nameLen == 0 { - break - } - index := eb[1] - offset := binary.LittleEndian.Uint16(eb[2:]) - offsetDelta - valueLen := binary.LittleEndian.Uint32(eb[8:]) - attr := xattr{ - Index: index, - Name: string(eb[16 : 16+nameLen]), - Value: b[offset : uint32(offset)+valueLen], - } - xattrs[decompressXattrName(index, attr.Name)] = attr.Value - eb = eb[attr.EntryLen():] - } -} - -func (w *Writer) writeXattrs(inode *inode, state *xattrState) error { - // Write the inline attributes. - if len(state.inode) != 0 { - inode.XattrInline = make([]byte, inodeExtraSize) - binary.LittleEndian.PutUint32(inode.XattrInline[0:], format.XAttrHeaderMagic) // Magic - putXattrs(state.inode, inode.XattrInline[4:], 0) - } - - // Write the block attributes. If there was previously an xattr block, then - // rewrite it even if it is now empty. - if len(state.block) != 0 || inode.XattrBlock != 0 { - sort.Slice(state.block, func(i, j int) bool { - return state.block[i].Index < state.block[j].Index || - len(state.block[i].Name) < len(state.block[j].Name) || - state.block[i].Name < state.block[j].Name - }) - - var b [blockSize]byte - binary.LittleEndian.PutUint32(b[0:], format.XAttrHeaderMagic) // Magic - binary.LittleEndian.PutUint32(b[4:], 1) // ReferenceCount - binary.LittleEndian.PutUint32(b[8:], 1) // Blocks - putXattrs(state.block, b[32:], 32) - - orig := w.block() - if inode.XattrBlock == 0 { - inode.XattrBlock = orig - inode.BlockCount++ - } else { - // Reuse the original block. - w.seekBlock(inode.XattrBlock) - defer w.seekBlock(orig) - } - - if _, err := w.write(b[:]); err != nil { - return err - } - } - - return nil -} - -func (w *Writer) write(b []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - if w.pos+int64(len(b)) > w.maxDiskSize { - w.err = exceededMaxSizeError{w.maxDiskSize} - return 0, w.err - } - n, err := w.bw.Write(b) - w.pos += int64(n) - w.err = err - return n, err -} - -func (w *Writer) zero(n int64) (int64, error) { - if w.err != nil { - return 0, w.err - } - if w.pos+int64(n) > w.maxDiskSize { - w.err = exceededMaxSizeError{w.maxDiskSize} - return 0, w.err - } - n, err := io.CopyN(w.bw, zero, n) - w.pos += n - w.err = err - return n, err -} - -func (w *Writer) makeInode(f *File, node *inode) (*inode, error) { - mode := f.Mode - if mode&format.TypeMask == 0 { - mode |= format.S_IFREG - } - typ := mode & format.TypeMask - ino := format.InodeNumber(len(w.inodes) + 1) - if node == nil { - node = &inode{ - Number: ino, - } - if typ == S_IFDIR { - node.Children = make(directory) - node.LinkCount = 1 // A directory is linked to itself. - } - } else if node.Flags&format.InodeFlagExtents != 0 { - // Since we cannot deallocate or reuse blocks, don't allow updates that - // would invalidate data that has already been written. - return nil, errors.New("cannot overwrite file with non-inline data") - } - node.Mode = mode - node.Uid = f.Uid - node.Gid = f.Gid - node.Flags = format.InodeFlagHugeFile - node.Atime = timeToFsTime(f.Atime) - node.Ctime = timeToFsTime(f.Ctime) - node.Mtime = timeToFsTime(f.Mtime) - node.Crtime = timeToFsTime(f.Crtime) - node.Devmajor = f.Devmajor - node.Devminor = f.Devminor - node.Data = nil - node.XattrInline = nil - - var xstate xattrState - xstate.init() - - var size int64 - switch typ { - case format.S_IFREG: - size = f.Size - if f.Size > maxFileSize { - return nil, fmt.Errorf("file too big: %d > %d", f.Size, int64(maxFileSize)) - } - if f.Size <= inlineDataSize && w.supportInlineData { - node.Data = make([]byte, f.Size) - extra := 0 - if f.Size > inodeDataSize { - extra = int(f.Size - inodeDataSize) - } - // Add a dummy entry for now. - if !xstate.addXattr("system.data", node.Data[:extra]) { - panic("not enough room for inline data") - } - node.Flags |= format.InodeFlagInlineData - } - case format.S_IFLNK: - node.Mode |= 0777 // Symlinks should appear as ugw rwx - size = int64(len(f.Linkname)) - if size <= smallSymlinkSize { - // Special case: small symlinks go directly in Block without setting - // an inline data flag. - node.Data = make([]byte, len(f.Linkname)) - copy(node.Data, f.Linkname) - } - case format.S_IFDIR, format.S_IFIFO, format.S_IFSOCK, format.S_IFCHR, format.S_IFBLK: - default: - return nil, fmt.Errorf("invalid mode %o", mode) - } - - // Accumulate the extended attributes. - if len(f.Xattrs) != 0 { - // Sort the xattrs to avoid non-determinism in map iteration. - var xattrs []string - for name := range f.Xattrs { - xattrs = append(xattrs, name) - } - sort.Strings(xattrs) - for _, name := range xattrs { - if !xstate.addXattr(name, f.Xattrs[name]) { - return nil, fmt.Errorf("could not fit xattr %s", name) - } - } - } - - if err := w.writeXattrs(node, &xstate); err != nil { - return nil, err - } - - node.Size = size - if typ == format.S_IFLNK && size > smallSymlinkSize { - // Write the link name as data. - w.startInode("", node, size) - if _, err := w.Write([]byte(f.Linkname)); err != nil { - return nil, err - } - if err := w.finishInode(); err != nil { - return nil, err - } - } - - if int(node.Number-1) >= len(w.inodes) { - w.inodes = append(w.inodes, node) - } - return node, nil -} - -func (w *Writer) root() *inode { - return w.getInode(format.InodeRoot) -} - -func (w *Writer) lookup(name string, mustExist bool) (*inode, *inode, string, error) { - root := w.root() - cleanname := path.Clean("/" + name)[1:] - if len(cleanname) == 0 { - return root, root, "", nil - } - dirname, childname := path.Split(cleanname) - if len(childname) == 0 || len(childname) > 0xff { - return nil, nil, "", fmt.Errorf("%s: invalid name", name) - } - dir := w.findPath(root, dirname) - if dir == nil || !dir.IsDir() { - return nil, nil, "", fmt.Errorf("%s: path not found", name) - } - child := dir.Children[childname] - if child == nil && mustExist { - return nil, nil, "", fmt.Errorf("%s: file not found", name) - } - return dir, child, childname, nil -} - -// CreateWithParents adds a file to the file system creating the parent directories in the path if -// they don't exist (like `mkdir -p`). These non existing parent directories are created -// with the same permissions as that of it's parent directory. It is expected that the a -// call to make these parent directories will be made at a later point with the correct -// permissions, at that time the permissions of these directories will be updated. -func (w *Writer) CreateWithParents(name string, f *File) error { - if err := w.finishInode(); err != nil { - return err - } - // go through the directories in the path one by one and create the - // parent directories if they don't exist. - cleanname := path.Clean("/" + name)[1:] - parentDirs, _ := path.Split(cleanname) - currentPath := "" - root := w.root() - dirname := "" - for parentDirs != "" { - dirname, parentDirs = splitFirst(parentDirs) - currentPath += "/" + dirname - if _, ok := root.Children[dirname]; !ok { - f := &File{ - Mode: root.Mode, - Atime: time.Now(), - Mtime: time.Now(), - Ctime: time.Now(), - Crtime: time.Now(), - Size: 0, - Uid: root.Uid, - Gid: root.Gid, - Devmajor: root.Devmajor, - Devminor: root.Devminor, - Xattrs: make(map[string][]byte), - } - if err := w.Create(currentPath, f); err != nil { - return fmt.Errorf("failed while creating parent directories: %w", err) - } - } - root = root.Children[dirname] - } - return w.Create(name, f) -} - -// Create adds a file to the file system. -func (w *Writer) Create(name string, f *File) error { - if err := w.finishInode(); err != nil { - return err - } - dir, existing, childname, err := w.lookup(name, false) - if err != nil { - return err - } - var reuse *inode - if existing != nil { - if existing.IsDir() { - if f.Mode&TypeMask != S_IFDIR { - return fmt.Errorf("%s: cannot replace a directory with a file", name) - } - reuse = existing - } else if f.Mode&TypeMask == S_IFDIR { - return fmt.Errorf("%s: cannot replace a file with a directory", name) - } else if existing.LinkCount < 2 { - reuse = existing - } - } else { - if f.Mode&TypeMask == S_IFDIR && dir.LinkCount >= format.MaxLinks { - return fmt.Errorf("%s: exceeded parent directory maximum link count", name) - } - } - child, err := w.makeInode(f, reuse) - if err != nil { - return fmt.Errorf("%s: %s", name, err) - } - if existing != child { - if existing != nil { - existing.LinkCount-- - } - dir.Children[childname] = child - child.LinkCount++ - if child.IsDir() { - dir.LinkCount++ - } - } - if child.Mode&format.TypeMask == format.S_IFREG { - w.startInode(name, child, f.Size) - } - return nil -} - -// Link adds a hard link to the file system. -func (w *Writer) Link(oldname, newname string) error { - if err := w.finishInode(); err != nil { - return err - } - newdir, existing, newchildname, err := w.lookup(newname, false) - if err != nil { - return err - } - if existing != nil && (existing.IsDir() || existing.LinkCount < 2) { - return fmt.Errorf("%s: cannot orphan existing file or directory", newname) - } - - _, oldfile, _, err := w.lookup(oldname, true) - if err != nil { - return err - } - switch oldfile.Mode & format.TypeMask { - case format.S_IFDIR, format.S_IFLNK: - return fmt.Errorf("%s: link target cannot be a directory or symlink: %s", newname, oldname) - } - - if existing != oldfile && oldfile.LinkCount >= format.MaxLinks { - return fmt.Errorf("%s: link target would exceed maximum link count: %s", newname, oldname) - } - - if existing != nil { - existing.LinkCount-- - } - oldfile.LinkCount++ - newdir.Children[newchildname] = oldfile - return nil -} - -// Stat returns information about a file that has been written. -func (w *Writer) Stat(name string) (*File, error) { - if err := w.finishInode(); err != nil { - return nil, err - } - _, node, _, err := w.lookup(name, true) - if err != nil { - return nil, err - } - f := &File{ - Size: node.Size, - Mode: node.Mode, - Uid: node.Uid, - Gid: node.Gid, - Atime: fsTimeToTime(node.Atime), - Ctime: fsTimeToTime(node.Ctime), - Mtime: fsTimeToTime(node.Mtime), - Crtime: fsTimeToTime(node.Crtime), - Devmajor: node.Devmajor, - Devminor: node.Devminor, - } - f.Xattrs = make(map[string][]byte) - if node.XattrBlock != 0 || len(node.XattrInline) != 0 { - if node.XattrBlock != 0 { - orig := w.block() - w.seekBlock(node.XattrBlock) - if w.err != nil { - return nil, w.err - } - var b [blockSize]byte - _, err := w.f.Read(b[:]) - w.seekBlock(orig) - if err != nil { - return nil, err - } - getXattrs(b[32:], f.Xattrs, 32) - } - if len(node.XattrInline) != 0 { - getXattrs(node.XattrInline[4:], f.Xattrs, 0) - delete(f.Xattrs, "system.data") - } - } - if node.FileType() == S_IFLNK { - if node.Size > smallSymlinkSize { - return nil, fmt.Errorf("%s: cannot retrieve link information", name) - } - f.Linkname = string(node.Data) - } - return f, nil -} - -func (w *Writer) Write(b []byte) (int, error) { - if len(b) == 0 { - return 0, nil - } - if w.dataWritten+int64(len(b)) > w.dataMax { - return 0, fmt.Errorf("%s: wrote too much: %d > %d", w.curName, w.dataWritten+int64(len(b)), w.dataMax) - } - - if w.curInode.Flags&format.InodeFlagInlineData != 0 { - copy(w.curInode.Data[w.dataWritten:], b) - w.dataWritten += int64(len(b)) - return len(b), nil - } - - n, err := w.write(b) - w.dataWritten += int64(n) - return n, err -} - -func (w *Writer) startInode(name string, inode *inode, size int64) { - if w.curInode != nil { - panic("inode already in progress") - } - w.curName = name - w.curInode = inode - w.dataWritten = 0 - w.dataMax = size -} - -func (w *Writer) block() uint32 { - return uint32(w.pos / blockSize) -} - -func (w *Writer) seekBlock(block uint32) { - w.pos = int64(block) * blockSize - if w.err != nil { - return - } - w.err = w.bw.Flush() - if w.err != nil { - return - } - _, w.err = w.f.Seek(w.pos, io.SeekStart) -} - -func (w *Writer) nextBlock() { - if w.pos%blockSize != 0 { - // Simplify callers; w.err is updated on failure. - _, _ = w.zero(blockSize - w.pos%blockSize) - } -} - -func fillExtents(hdr *format.ExtentHeader, extents []format.ExtentLeafNode, startBlock, offset, inodeSize uint32) { - *hdr = format.ExtentHeader{ - Magic: format.ExtentHeaderMagic, - Entries: uint16(len(extents)), - Max: uint16(cap(extents)), - Depth: 0, - } - for i := range extents { - block := offset + uint32(i)*maxBlocksPerExtent - length := inodeSize - block - if length > maxBlocksPerExtent { - length = maxBlocksPerExtent - } - start := startBlock + block - extents[i] = format.ExtentLeafNode{ - Block: block, - Length: uint16(length), - StartLow: start, - } - } -} - -func (w *Writer) writeExtents(inode *inode) error { - start := w.pos - w.dataWritten - if start%blockSize != 0 { - panic("unaligned") - } - w.nextBlock() - - startBlock := uint32(start / blockSize) - blocks := w.block() - startBlock - usedBlocks := blocks - - const extentNodeSize = 12 - const extentsPerBlock = blockSize/extentNodeSize - 1 - - extents := (blocks + maxBlocksPerExtent - 1) / maxBlocksPerExtent - var b bytes.Buffer - if extents == 0 { - // Nothing to do. - } else if extents <= 4 { - var root struct { - hdr format.ExtentHeader - extents [4]format.ExtentLeafNode - } - fillExtents(&root.hdr, root.extents[:extents], startBlock, 0, blocks) - _ = binary.Write(&b, binary.LittleEndian, root) - } else if extents <= 4*extentsPerBlock { - const extentsPerBlock = blockSize/extentNodeSize - 1 - extentBlocks := extents/extentsPerBlock + 1 - usedBlocks += extentBlocks - var b2 bytes.Buffer - - var root struct { - hdr format.ExtentHeader - nodes [4]format.ExtentIndexNode - } - root.hdr = format.ExtentHeader{ - Magic: format.ExtentHeaderMagic, - Entries: uint16(extentBlocks), - Max: 4, - Depth: 1, - } - for i := uint32(0); i < extentBlocks; i++ { - root.nodes[i] = format.ExtentIndexNode{ - Block: i * extentsPerBlock * maxBlocksPerExtent, - LeafLow: w.block(), - } - extentsInBlock := extents - i*extentBlocks - if extentsInBlock > extentsPerBlock { - extentsInBlock = extentsPerBlock - } - - var node struct { - hdr format.ExtentHeader - extents [extentsPerBlock]format.ExtentLeafNode - _ [blockSize - (extentsPerBlock+1)*extentNodeSize]byte - } - - offset := i * extentsPerBlock * maxBlocksPerExtent - fillExtents(&node.hdr, node.extents[:extentsInBlock], startBlock+offset, offset, blocks) - _ = binary.Write(&b2, binary.LittleEndian, node) - if _, err := w.write(b2.Next(blockSize)); err != nil { - return err - } - } - _ = binary.Write(&b, binary.LittleEndian, root) - } else { - panic("file too big") - } - - inode.Data = b.Bytes() - inode.Flags |= format.InodeFlagExtents - inode.BlockCount += usedBlocks - return w.err -} - -func (w *Writer) finishInode() error { - if !w.initialized { - if err := w.init(); err != nil { - return err - } - } - if w.curInode == nil { - return nil - } - if w.dataWritten != w.dataMax { - return fmt.Errorf("did not write the right amount: %d != %d", w.dataWritten, w.dataMax) - } - - if w.dataMax != 0 && w.curInode.Flags&format.InodeFlagInlineData == 0 { - if err := w.writeExtents(w.curInode); err != nil { - return err - } - } - - w.dataWritten = 0 - w.dataMax = 0 - w.curInode = nil - return w.err -} - -func modeToFileType(mode uint16) format.FileType { - switch mode & format.TypeMask { - default: - return format.FileTypeUnknown - case format.S_IFREG: - return format.FileTypeRegular - case format.S_IFDIR: - return format.FileTypeDirectory - case format.S_IFCHR: - return format.FileTypeCharacter - case format.S_IFBLK: - return format.FileTypeBlock - case format.S_IFIFO: - return format.FileTypeFIFO - case format.S_IFSOCK: - return format.FileTypeSocket - case format.S_IFLNK: - return format.FileTypeSymbolicLink - } -} - -type constReader byte - -var zero = constReader(0) - -func (r constReader) Read(b []byte) (int, error) { - for i := range b { - b[i] = byte(r) - } - return len(b), nil -} - -func (w *Writer) writeDirectory(dir, parent *inode) error { - if err := w.finishInode(); err != nil { - return err - } - - // The size of the directory is not known yet. - w.startInode("", dir, 0x7fffffffffffffff) - left := blockSize - finishBlock := func() error { - if left > 0 { - e := format.DirectoryEntry{ - RecordLength: uint16(left), - } - err := binary.Write(w, binary.LittleEndian, e) - if err != nil { - return err - } - left -= directoryEntrySize - if left < 4 { - panic("not enough space for trailing entry") - } - _, err = io.CopyN(w, zero, int64(left)) - if err != nil { - return err - } - } - left = blockSize - return nil - } - - writeEntry := func(ino format.InodeNumber, name string) error { - rlb := directoryEntrySize + len(name) - rl := (rlb + 3) & ^3 - if left < rl+12 { - if err := finishBlock(); err != nil { - return err - } - } - e := format.DirectoryEntry{ - Inode: ino, - RecordLength: uint16(rl), - NameLength: uint8(len(name)), - FileType: modeToFileType(w.getInode(ino).Mode), - } - err := binary.Write(w, binary.LittleEndian, e) - if err != nil { - return err - } - _, err = w.Write([]byte(name)) - if err != nil { - return err - } - var zero [4]byte - _, err = w.Write(zero[:rl-rlb]) - if err != nil { - return err - } - left -= rl - return nil - } - if err := writeEntry(dir.Number, "."); err != nil { - return err - } - if err := writeEntry(parent.Number, ".."); err != nil { - return err - } - - // Follow e2fsck's convention and sort the children by inode number. - var children []string - for name := range dir.Children { - children = append(children, name) - } - sort.Slice(children, func(i, j int) bool { - left_num := dir.Children[children[i]].Number - right_num := dir.Children[children[j]].Number - - if left_num == right_num { - return children[i] < children[j] - } - return left_num < right_num - }) - - for _, name := range children { - child := dir.Children[name] - if err := writeEntry(child.Number, name); err != nil { - return err - } - } - if err := finishBlock(); err != nil { - return err - } - w.curInode.Size = w.dataWritten - w.dataMax = w.dataWritten - return nil -} - -func (w *Writer) writeDirectoryRecursive(dir, parent *inode) error { - if err := w.writeDirectory(dir, parent); err != nil { - return err - } - - // Follow e2fsck's convention and sort the children by inode number. - var children []string - for name := range dir.Children { - children = append(children, name) - } - sort.Slice(children, func(i, j int) bool { - left_num := dir.Children[children[i]].Number - right_num := dir.Children[children[j]].Number - - if left_num == right_num { - return children[i] < children[j] - } - return left_num < right_num - }) - - for _, name := range children { - child := dir.Children[name] - if child.IsDir() { - if err := w.writeDirectoryRecursive(child, dir); err != nil { - return err - } - } - } - return nil -} - -func (w *Writer) writeInodeTable(tableSize uint32) error { - var b bytes.Buffer - for _, inode := range w.inodes { - if inode != nil { - binode := format.Inode{ - Mode: inode.Mode, - Uid: uint16(inode.Uid & 0xffff), - Gid: uint16(inode.Gid & 0xffff), - SizeLow: uint32(inode.Size & 0xffffffff), - SizeHigh: uint32(inode.Size >> 32), - LinksCount: uint16(inode.LinkCount), - BlocksLow: inode.BlockCount, - Flags: inode.Flags, - XattrBlockLow: inode.XattrBlock, - UidHigh: uint16(inode.Uid >> 16), - GidHigh: uint16(inode.Gid >> 16), - ExtraIsize: uint16(inodeUsedSize - 128), - Atime: uint32(inode.Atime), - AtimeExtra: uint32(inode.Atime >> 32), - Ctime: uint32(inode.Ctime), - CtimeExtra: uint32(inode.Ctime >> 32), - Mtime: uint32(inode.Mtime), - MtimeExtra: uint32(inode.Mtime >> 32), - Crtime: uint32(inode.Crtime), - CrtimeExtra: uint32(inode.Crtime >> 32), - } - switch inode.Mode & format.TypeMask { - case format.S_IFDIR, format.S_IFREG, format.S_IFLNK: - n := copy(binode.Block[:], inode.Data) - if n < len(inode.Data) { - // Rewrite the first xattr with the data. - xattr := [1]xattr{{ - Name: "data", - Index: 7, // "system." - Value: inode.Data[n:], - }} - putXattrs(xattr[:], inode.XattrInline[4:], 0) - } - case format.S_IFBLK, format.S_IFCHR: - dev := inode.Devminor&0xff | inode.Devmajor<<8 | (inode.Devminor&0xffffff00)<<12 - binary.LittleEndian.PutUint32(binode.Block[4:], dev) - } - - _ = binary.Write(&b, binary.LittleEndian, binode) - b.Truncate(inodeUsedSize) - n, _ := b.Write(inode.XattrInline) - _, _ = io.CopyN(&b, zero, int64(inodeExtraSize-n)) - } else { - _, _ = io.CopyN(&b, zero, inodeSize) - } - if _, err := w.write(b.Next(inodeSize)); err != nil { - return err - } - } - rest := tableSize - uint32(len(w.inodes)*inodeSize) - if _, err := w.zero(int64(rest)); err != nil { - return err - } - return nil -} - -// NewWriter returns a Writer that writes an ext4 file system to the provided -// WriteSeeker. -func NewWriter(f io.ReadWriteSeeker, opts ...Option) *Writer { - w := &Writer{ - f: f, - bw: bufio.NewWriterSize(f, 65536*8), - maxDiskSize: defaultMaxDiskSize, - } - for _, opt := range opts { - opt(w) - } - return w -} - -// An Option provides extra options to NewWriter. -type Option func(*Writer) - -// InlineData instructs the Writer to write small files into the inode -// structures directly. This creates smaller images but currently is not -// compatible with DAX. -func InlineData(w *Writer) { - w.supportInlineData = true -} - -// MaximumDiskSize instructs the writer to reserve enough metadata space for the -// specified disk size. If not provided, then 16GB is the default. -func MaximumDiskSize(size int64) Option { - return func(w *Writer) { - if size < 0 || size > maxMaxDiskSize { - w.maxDiskSize = maxMaxDiskSize - } else if size == 0 { - w.maxDiskSize = defaultMaxDiskSize - } else { - w.maxDiskSize = (size + blockSize - 1) &^ (blockSize - 1) - } - } -} - -func (w *Writer) init() error { - // Skip the defective block inode. - w.inodes = make([]*inode, 1, 32) - // Create the root directory. - root, _ := w.makeInode(&File{ - Mode: format.S_IFDIR | 0755, - }, nil) - root.LinkCount++ // The root is linked to itself. - // Skip until the first non-reserved inode. - w.inodes = append(w.inodes, make([]*inode, inodeFirst-len(w.inodes)-1)...) - maxBlocks := (w.maxDiskSize-1)/blockSize + 1 - maxGroups := (maxBlocks-1)/blocksPerGroup + 1 - w.gdBlocks = uint32((maxGroups-1)/groupsPerDescriptorBlock + 1) - - // Skip past the superblock and block descriptor table. - w.seekBlock(1 + w.gdBlocks) - w.initialized = true - - // The lost+found directory is required to exist for e2fsck to pass. - if err := w.Create("lost+found", &File{Mode: format.S_IFDIR | 0700}); err != nil { - return err - } - return w.err -} - -func groupCount(blocks uint32, inodes uint32, inodesPerGroup uint32) uint32 { - inodeBlocksPerGroup := inodesPerGroup * inodeSize / blockSize - dataBlocksPerGroup := blocksPerGroup - inodeBlocksPerGroup - 2 // save room for the bitmaps - - // Increase the block count to ensure there are enough groups for all the - // inodes. - minBlocks := (inodes-1)/inodesPerGroup*dataBlocksPerGroup + 1 - if blocks < minBlocks { - blocks = minBlocks - } - - return (blocks + dataBlocksPerGroup - 1) / dataBlocksPerGroup -} - -func bestGroupCount(blocks uint32, inodes uint32) (groups uint32, inodesPerGroup uint32) { - groups = 0xffffffff - for ipg := uint32(inodesPerGroupIncrement); ipg <= maxInodesPerGroup; ipg += inodesPerGroupIncrement { - g := groupCount(blocks, inodes, ipg) - if g < groups { - groups = g - inodesPerGroup = ipg - } - } - return -} - -func (w *Writer) Close() error { - if err := w.finishInode(); err != nil { - return err - } - root := w.root() - if err := w.writeDirectoryRecursive(root, root); err != nil { - return err - } - // Finish the last inode (probably a directory). - if err := w.finishInode(); err != nil { - return err - } - - // Write the inode table - inodeTableOffset := w.block() - groups, inodesPerGroup := bestGroupCount(inodeTableOffset, uint32(len(w.inodes))) - err := w.writeInodeTable(groups * inodesPerGroup * inodeSize) - if err != nil { - return err - } - - // Write the bitmaps. - bitmapOffset := w.block() - bitmapSize := groups * 2 - validDataSize := bitmapOffset + bitmapSize - diskSize := validDataSize - minSize := (groups-1)*blocksPerGroup + 1 - if diskSize < minSize { - diskSize = minSize - } - - usedGdBlocks := (groups-1)/groupsPerDescriptorBlock + 1 - if usedGdBlocks > w.gdBlocks { - return exceededMaxSizeError{w.maxDiskSize} - } - - gds := make([]format.GroupDescriptor, w.gdBlocks*groupsPerDescriptorBlock) - inodeTableSizePerGroup := inodesPerGroup * inodeSize / blockSize - var totalUsedBlocks, totalUsedInodes uint32 - for g := uint32(0); g < groups; g++ { - var b [blockSize * 2]byte - var dirCount, usedInodeCount, usedBlockCount uint16 - - // Block bitmap - if (g+1)*blocksPerGroup <= validDataSize { - // This group is fully allocated. - for j := range b[:blockSize] { - b[j] = 0xff - } - usedBlockCount = blocksPerGroup - } else if g*blocksPerGroup < validDataSize { - for j := uint32(0); j < validDataSize-g*blocksPerGroup; j++ { - b[j/8] |= 1 << (j % 8) - usedBlockCount++ - } - } - if g == 0 { - // Unused group descriptor blocks should be cleared. - for j := 1 + usedGdBlocks; j < 1+w.gdBlocks; j++ { - b[j/8] &^= 1 << (j % 8) - usedBlockCount-- - } - } - if g == groups-1 && diskSize%blocksPerGroup != 0 { - // Blocks that aren't present in the disk should be marked as - // allocated. - for j := diskSize % blocksPerGroup; j < blocksPerGroup; j++ { - b[j/8] |= 1 << (j % 8) - usedBlockCount++ - } - } - // Inode bitmap - for j := uint32(0); j < inodesPerGroup; j++ { - ino := format.InodeNumber(1 + g*inodesPerGroup + j) - inode := w.getInode(ino) - if ino < inodeFirst || inode != nil { - b[blockSize+j/8] |= 1 << (j % 8) - usedInodeCount++ - } - if inode != nil && inode.Mode&format.TypeMask == format.S_IFDIR { - dirCount++ - } - } - _, err := w.write(b[:]) - if err != nil { - return err - } - gds[g] = format.GroupDescriptor{ - BlockBitmapLow: bitmapOffset + 2*g, - InodeBitmapLow: bitmapOffset + 2*g + 1, - InodeTableLow: inodeTableOffset + g*inodeTableSizePerGroup, - UsedDirsCountLow: dirCount, - FreeInodesCountLow: uint16(inodesPerGroup) - usedInodeCount, - FreeBlocksCountLow: blocksPerGroup - usedBlockCount, - } - - totalUsedBlocks += uint32(usedBlockCount) - totalUsedInodes += uint32(usedInodeCount) - } - - // Zero up to the disk size. - _, err = w.zero(int64(diskSize-bitmapOffset-bitmapSize) * blockSize) - if err != nil { - return err - } - - // Write the block descriptors - w.seekBlock(1) - if w.err != nil { - return w.err - } - err = binary.Write(w.bw, binary.LittleEndian, gds) - if err != nil { - return err - } - - // Write the super block - var blk [blockSize]byte - b := bytes.NewBuffer(blk[:1024]) - sb := &format.SuperBlock{ - InodesCount: inodesPerGroup * groups, - BlocksCountLow: diskSize, - FreeBlocksCountLow: blocksPerGroup*groups - totalUsedBlocks, - FreeInodesCount: inodesPerGroup*groups - totalUsedInodes, - FirstDataBlock: 0, - LogBlockSize: 2, // 2^(10 + 2) - LogClusterSize: 2, - BlocksPerGroup: blocksPerGroup, - ClustersPerGroup: blocksPerGroup, - InodesPerGroup: inodesPerGroup, - Magic: format.SuperBlockMagic, - State: 1, // cleanly unmounted - Errors: 1, // continue on error? - CreatorOS: 0, // Linux - RevisionLevel: 1, // dynamic inode sizes - FirstInode: inodeFirst, - LpfInode: inodeLostAndFound, - InodeSize: inodeSize, - FeatureCompat: format.CompatSparseSuper2 | format.CompatExtAttr, - FeatureIncompat: format.IncompatFiletype | format.IncompatExtents | format.IncompatFlexBg, - FeatureRoCompat: format.RoCompatLargeFile | format.RoCompatHugeFile | format.RoCompatExtraIsize | format.RoCompatReadonly, - MinExtraIsize: extraIsize, - WantExtraIsize: extraIsize, - LogGroupsPerFlex: 31, - } - if w.supportInlineData { - sb.FeatureIncompat |= format.IncompatInlineData - } - _ = binary.Write(b, binary.LittleEndian, sb) - w.seekBlock(0) - if _, err := w.write(blk[:]); err != nil { - return err - } - w.seekBlock(diskSize) - return w.err -} diff --git a/vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go b/vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go deleted file mode 100644 index 9dc4c4e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go +++ /dev/null @@ -1,411 +0,0 @@ -package format - -type SuperBlock struct { - InodesCount uint32 - BlocksCountLow uint32 - RootBlocksCountLow uint32 - FreeBlocksCountLow uint32 - FreeInodesCount uint32 - FirstDataBlock uint32 - LogBlockSize uint32 - LogClusterSize uint32 - BlocksPerGroup uint32 - ClustersPerGroup uint32 - InodesPerGroup uint32 - Mtime uint32 - Wtime uint32 - MountCount uint16 - MaxMountCount uint16 - Magic uint16 - State uint16 - Errors uint16 - MinorRevisionLevel uint16 - LastCheck uint32 - CheckInterval uint32 - CreatorOS uint32 - RevisionLevel uint32 - DefaultReservedUid uint16 - DefaultReservedGid uint16 - FirstInode uint32 - InodeSize uint16 - BlockGroupNr uint16 - FeatureCompat CompatFeature - FeatureIncompat IncompatFeature - FeatureRoCompat RoCompatFeature - UUID [16]uint8 - VolumeName [16]byte - LastMounted [64]byte - AlgorithmUsageBitmap uint32 - PreallocBlocks uint8 - PreallocDirBlocks uint8 - ReservedGdtBlocks uint16 - JournalUUID [16]uint8 - JournalInum uint32 - JournalDev uint32 - LastOrphan uint32 - HashSeed [4]uint32 - DefHashVersion uint8 - JournalBackupType uint8 - DescSize uint16 - DefaultMountOpts uint32 - FirstMetaBg uint32 - MkfsTime uint32 - JournalBlocks [17]uint32 - BlocksCountHigh uint32 - RBlocksCountHigh uint32 - FreeBlocksCountHigh uint32 - MinExtraIsize uint16 - WantExtraIsize uint16 - Flags uint32 - RaidStride uint16 - MmpInterval uint16 - MmpBlock uint64 - RaidStripeWidth uint32 - LogGroupsPerFlex uint8 - ChecksumType uint8 - ReservedPad uint16 - KbytesWritten uint64 - SnapshotInum uint32 - SnapshotID uint32 - SnapshotRBlocksCount uint64 - SnapshotList uint32 - ErrorCount uint32 - FirstErrorTime uint32 - FirstErrorInode uint32 - FirstErrorBlock uint64 - FirstErrorFunc [32]uint8 - FirstErrorLine uint32 - LastErrorTime uint32 - LastErrorInode uint32 - LastErrorLine uint32 - LastErrorBlock uint64 - LastErrorFunc [32]uint8 - MountOpts [64]uint8 - UserQuotaInum uint32 - GroupQuotaInum uint32 - OverheadBlocks uint32 - BackupBgs [2]uint32 - EncryptAlgos [4]uint8 - EncryptPwSalt [16]uint8 - LpfInode uint32 - ProjectQuotaInum uint32 - ChecksumSeed uint32 - WtimeHigh uint8 - MtimeHigh uint8 - MkfsTimeHigh uint8 - LastcheckHigh uint8 - FirstErrorTimeHigh uint8 - LastErrorTimeHigh uint8 - Pad [2]uint8 - Reserved [96]uint32 - Checksum uint32 -} - -const SuperBlockMagic uint16 = 0xef53 - -type CompatFeature uint32 -type IncompatFeature uint32 -type RoCompatFeature uint32 - -const ( - CompatDirPrealloc CompatFeature = 0x1 - CompatImagicInodes CompatFeature = 0x2 - CompatHasJournal CompatFeature = 0x4 - CompatExtAttr CompatFeature = 0x8 - CompatResizeInode CompatFeature = 0x10 - CompatDirIndex CompatFeature = 0x20 - CompatLazyBg CompatFeature = 0x40 - CompatExcludeInode CompatFeature = 0x80 - CompatExcludeBitmap CompatFeature = 0x100 - CompatSparseSuper2 CompatFeature = 0x200 - - IncompatCompression IncompatFeature = 0x1 - IncompatFiletype IncompatFeature = 0x2 - IncompatRecover IncompatFeature = 0x4 - IncompatJournalDev IncompatFeature = 0x8 - IncompatMetaBg IncompatFeature = 0x10 - IncompatExtents IncompatFeature = 0x40 - Incompat_64Bit IncompatFeature = 0x80 - IncompatMmp IncompatFeature = 0x100 - IncompatFlexBg IncompatFeature = 0x200 - IncompatEaInode IncompatFeature = 0x400 - IncompatDirdata IncompatFeature = 0x1000 - IncompatCsumSeed IncompatFeature = 0x2000 - IncompatLargedir IncompatFeature = 0x4000 - IncompatInlineData IncompatFeature = 0x8000 - IncompatEncrypt IncompatFeature = 0x10000 - - RoCompatSparseSuper RoCompatFeature = 0x1 - RoCompatLargeFile RoCompatFeature = 0x2 - RoCompatBtreeDir RoCompatFeature = 0x4 - RoCompatHugeFile RoCompatFeature = 0x8 - RoCompatGdtCsum RoCompatFeature = 0x10 - RoCompatDirNlink RoCompatFeature = 0x20 - RoCompatExtraIsize RoCompatFeature = 0x40 - RoCompatHasSnapshot RoCompatFeature = 0x80 - RoCompatQuota RoCompatFeature = 0x100 - RoCompatBigalloc RoCompatFeature = 0x200 - RoCompatMetadataCsum RoCompatFeature = 0x400 - RoCompatReplica RoCompatFeature = 0x800 - RoCompatReadonly RoCompatFeature = 0x1000 - RoCompatProject RoCompatFeature = 0x2000 -) - -type BlockGroupFlag uint16 - -const ( - BlockGroupInodeUninit BlockGroupFlag = 0x1 - BlockGroupBlockUninit BlockGroupFlag = 0x2 - BlockGroupInodeZeroed BlockGroupFlag = 0x4 -) - -type GroupDescriptor struct { - BlockBitmapLow uint32 - InodeBitmapLow uint32 - InodeTableLow uint32 - FreeBlocksCountLow uint16 - FreeInodesCountLow uint16 - UsedDirsCountLow uint16 - Flags BlockGroupFlag - ExcludeBitmapLow uint32 - BlockBitmapCsumLow uint16 - InodeBitmapCsumLow uint16 - ItableUnusedLow uint16 - Checksum uint16 -} - -type GroupDescriptor64 struct { - GroupDescriptor - BlockBitmapHigh uint32 - InodeBitmapHigh uint32 - InodeTableHigh uint32 - FreeBlocksCountHigh uint16 - FreeInodesCountHigh uint16 - UsedDirsCountHigh uint16 - ItableUnusedHigh uint16 - ExcludeBitmapHigh uint32 - BlockBitmapCsumHigh uint16 - InodeBitmapCsumHigh uint16 - Reserved uint32 -} - -const ( - S_IXOTH = 0x1 - S_IWOTH = 0x2 - S_IROTH = 0x4 - S_IXGRP = 0x8 - S_IWGRP = 0x10 - S_IRGRP = 0x20 - S_IXUSR = 0x40 - S_IWUSR = 0x80 - S_IRUSR = 0x100 - S_ISVTX = 0x200 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xA000 - S_IFSOCK = 0xC000 - - TypeMask uint16 = 0xF000 -) - -type InodeNumber uint32 - -const ( - InodeRoot = 2 -) - -type Inode struct { - Mode uint16 - Uid uint16 - SizeLow uint32 - Atime uint32 - Ctime uint32 - Mtime uint32 - Dtime uint32 - Gid uint16 - LinksCount uint16 - BlocksLow uint32 - Flags InodeFlag - Version uint32 - Block [60]byte - Generation uint32 - XattrBlockLow uint32 - SizeHigh uint32 - ObsoleteFragmentAddr uint32 - BlocksHigh uint16 - XattrBlockHigh uint16 - UidHigh uint16 - GidHigh uint16 - ChecksumLow uint16 - Reserved uint16 - ExtraIsize uint16 - ChecksumHigh uint16 - CtimeExtra uint32 - MtimeExtra uint32 - AtimeExtra uint32 - Crtime uint32 - CrtimeExtra uint32 - VersionHigh uint32 - Projid uint32 -} - -type InodeFlag uint32 - -const ( - InodeFlagSecRm InodeFlag = 0x1 - InodeFlagUnRm InodeFlag = 0x2 - InodeFlagCompressed InodeFlag = 0x4 - InodeFlagSync InodeFlag = 0x8 - InodeFlagImmutable InodeFlag = 0x10 - InodeFlagAppend InodeFlag = 0x20 - InodeFlagNoDump InodeFlag = 0x40 - InodeFlagNoAtime InodeFlag = 0x80 - InodeFlagDirtyCompressed InodeFlag = 0x100 - InodeFlagCompressedClusters InodeFlag = 0x200 - InodeFlagNoCompress InodeFlag = 0x400 - InodeFlagEncrypted InodeFlag = 0x800 - InodeFlagHashedIndex InodeFlag = 0x1000 - InodeFlagMagic InodeFlag = 0x2000 - InodeFlagJournalData InodeFlag = 0x4000 - InodeFlagNoTail InodeFlag = 0x8000 - InodeFlagDirSync InodeFlag = 0x10000 - InodeFlagTopDir InodeFlag = 0x20000 - InodeFlagHugeFile InodeFlag = 0x40000 - InodeFlagExtents InodeFlag = 0x80000 - InodeFlagEaInode InodeFlag = 0x200000 - InodeFlagEOFBlocks InodeFlag = 0x400000 - InodeFlagSnapfile InodeFlag = 0x01000000 - InodeFlagSnapfileDeleted InodeFlag = 0x04000000 - InodeFlagSnapfileShrunk InodeFlag = 0x08000000 - InodeFlagInlineData InodeFlag = 0x10000000 - InodeFlagProjectIDInherit InodeFlag = 0x20000000 - InodeFlagReserved InodeFlag = 0x80000000 -) - -const ( - MaxLinks = 65000 -) - -type ExtentHeader struct { - Magic uint16 - Entries uint16 - Max uint16 - Depth uint16 - Generation uint32 -} - -const ExtentHeaderMagic uint16 = 0xf30a - -type ExtentIndexNode struct { - Block uint32 - LeafLow uint32 - LeafHigh uint16 - Unused uint16 -} - -type ExtentLeafNode struct { - Block uint32 - Length uint16 - StartHigh uint16 - StartLow uint32 -} - -type ExtentTail struct { - Checksum uint32 -} - -type DirectoryEntry struct { - Inode InodeNumber - RecordLength uint16 - NameLength uint8 - FileType FileType - //Name []byte -} - -type FileType uint8 - -const ( - FileTypeUnknown FileType = 0x0 - FileTypeRegular FileType = 0x1 - FileTypeDirectory FileType = 0x2 - FileTypeCharacter FileType = 0x3 - FileTypeBlock FileType = 0x4 - FileTypeFIFO FileType = 0x5 - FileTypeSocket FileType = 0x6 - FileTypeSymbolicLink FileType = 0x7 -) - -type DirectoryEntryTail struct { - ReservedZero1 uint32 - RecordLength uint16 - ReservedZero2 uint8 - FileType uint8 - Checksum uint32 -} - -type DirectoryTreeRoot struct { - Dot DirectoryEntry - DotName [4]byte - DotDot DirectoryEntry - DotDotName [4]byte - ReservedZero uint32 - HashVersion uint8 - InfoLength uint8 - IndirectLevels uint8 - UnusedFlags uint8 - Limit uint16 - Count uint16 - Block uint32 - //Entries []DirectoryTreeEntry -} - -type DirectoryTreeNode struct { - FakeInode uint32 - FakeRecordLength uint16 - NameLength uint8 - FileType uint8 - Limit uint16 - Count uint16 - Block uint32 - //Entries []DirectoryTreeEntry -} - -type DirectoryTreeEntry struct { - Hash uint32 - Block uint32 -} - -type DirectoryTreeTail struct { - Reserved uint32 - Checksum uint32 -} - -type XAttrInodeBodyHeader struct { - Magic uint32 -} - -type XAttrHeader struct { - Magic uint32 - ReferenceCount uint32 - Blocks uint32 - Hash uint32 - Checksum uint32 - Reserved [3]uint32 -} - -const XAttrHeaderMagic uint32 = 0xea020000 - -type XAttrEntry struct { - NameLength uint8 - NameIndex uint8 - ValueOffset uint16 - ValueInum uint32 - ValueSize uint32 - Hash uint32 - //Name []byte -} diff --git a/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go b/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go deleted file mode 100644 index 1aeae29..0000000 --- a/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go +++ /dev/null @@ -1,209 +0,0 @@ -package tar2ext4 - -import ( - "archive/tar" - "bufio" - "encoding/binary" - "io" - "os" - "path" - "strings" - - "github.com/Microsoft/hcsshim/ext4/internal/compactext4" - "github.com/Microsoft/hcsshim/ext4/internal/format" -) - -type params struct { - convertWhiteout bool - appendVhdFooter bool - ext4opts []compactext4.Option -} - -// Option is the type for optional parameters to Convert. -type Option func(*params) - -// ConvertWhiteout instructs the converter to convert OCI-style whiteouts -// (beginning with .wh.) to overlay-style whiteouts. -func ConvertWhiteout(p *params) { - p.convertWhiteout = true -} - -// AppendVhdFooter instructs the converter to add a fixed VHD footer to the -// file. -func AppendVhdFooter(p *params) { - p.appendVhdFooter = true -} - -// InlineData instructs the converter to write small files into the inode -// structures directly. This creates smaller images but currently is not -// compatible with DAX. -func InlineData(p *params) { - p.ext4opts = append(p.ext4opts, compactext4.InlineData) -} - -// MaximumDiskSize instructs the writer to limit the disk size to the specified -// value. This also reserves enough metadata space for the specified disk size. -// If not provided, then 16GB is the default. -func MaximumDiskSize(size int64) Option { - return func(p *params) { - p.ext4opts = append(p.ext4opts, compactext4.MaximumDiskSize(size)) - } -} - -const ( - whiteoutPrefix = ".wh." - opaqueWhiteout = ".wh..wh..opq" -) - -// Convert writes a compact ext4 file system image that contains the files in the -// input tar stream. -func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error { - var p params - for _, opt := range options { - opt(&p) - } - t := tar.NewReader(bufio.NewReader(r)) - fs := compactext4.NewWriter(w, p.ext4opts...) - for { - hdr, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if p.convertWhiteout { - dir, name := path.Split(hdr.Name) - if strings.HasPrefix(name, whiteoutPrefix) { - if name == opaqueWhiteout { - // Update the directory with the appropriate xattr. - f, err := fs.Stat(dir) - if err != nil { - return err - } - f.Xattrs["trusted.overlay.opaque"] = []byte("y") - err = fs.Create(dir, f) - if err != nil { - return err - } - } else { - // Create an overlay-style whiteout. - f := &compactext4.File{ - Mode: compactext4.S_IFCHR, - Devmajor: 0, - Devminor: 0, - } - err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f) - if err != nil { - return err - } - } - - continue - } - } - - if hdr.Typeflag == tar.TypeLink { - err = fs.Link(hdr.Linkname, hdr.Name) - if err != nil { - return err - } - } else { - f := &compactext4.File{ - Mode: uint16(hdr.Mode), - Atime: hdr.AccessTime, - Mtime: hdr.ModTime, - Ctime: hdr.ChangeTime, - Crtime: hdr.ModTime, - Size: hdr.Size, - Uid: uint32(hdr.Uid), - Gid: uint32(hdr.Gid), - Linkname: hdr.Linkname, - Devmajor: uint32(hdr.Devmajor), - Devminor: uint32(hdr.Devminor), - Xattrs: make(map[string][]byte), - } - for key, value := range hdr.PAXRecords { - const xattrPrefix = "SCHILY.xattr." - if strings.HasPrefix(key, xattrPrefix) { - f.Xattrs[key[len(xattrPrefix):]] = []byte(value) - } - } - - var typ uint16 - switch hdr.Typeflag { - case tar.TypeReg, tar.TypeRegA: - typ = compactext4.S_IFREG - case tar.TypeSymlink: - typ = compactext4.S_IFLNK - case tar.TypeChar: - typ = compactext4.S_IFCHR - case tar.TypeBlock: - typ = compactext4.S_IFBLK - case tar.TypeDir: - typ = compactext4.S_IFDIR - case tar.TypeFifo: - typ = compactext4.S_IFIFO - } - f.Mode &= ^compactext4.TypeMask - f.Mode |= typ - err = fs.CreateWithParents(hdr.Name, f) - if err != nil { - return err - } - _, err = io.Copy(fs, t) - if err != nil { - return err - } - } - } - err := fs.Close() - if err != nil { - return err - } - if p.appendVhdFooter { - size, err := w.Seek(0, io.SeekEnd) - if err != nil { - return err - } - err = binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size)) - if err != nil { - return err - } - } - return nil -} - -// ReadExt4SuperBlock reads and returns ext4 super block from VHD -// -// The layout on disk is as follows: -// | Group 0 padding | - 1024 bytes -// | ext4 SuperBlock | - 1 block -// | Group Descriptors | - many blocks -// | Reserved GDT Blocks | - many blocks -// | Data Block Bitmap | - 1 block -// | inode Bitmap | - 1 block -// | inode Table | - many blocks -// | Data Blocks | - many blocks -// -// More details can be found here https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout -// -// Our goal is to skip the Group 0 padding, read and return the ext4 SuperBlock -func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) { - vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0) - if err != nil { - return nil, err - } - defer vhd.Close() - - // Skip padding at the start - if _, err := vhd.Seek(1024, io.SeekStart); err != nil { - return nil, err - } - var sb format.SuperBlock - if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil { - return nil, err - } - return &sb, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go b/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go deleted file mode 100644 index 99f6e3a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go +++ /dev/null @@ -1,76 +0,0 @@ -package tar2ext4 - -import ( - "bytes" - "crypto/rand" - "encoding/binary" -) - -// Constants for the VHD footer -const ( - cookieMagic = "conectix" - featureMask = 0x2 - fileFormatVersionMagic = 0x00010000 - fixedDataOffset = -1 - creatorVersionMagic = 0x000a0000 - diskTypeFixed = 2 -) - -type vhdFooter struct { - Cookie [8]byte - Features uint32 - FileFormatVersion uint32 - DataOffset int64 - TimeStamp uint32 - CreatorApplication [4]byte - CreatorVersion uint32 - CreatorHostOS [4]byte - OriginalSize int64 - CurrentSize int64 - DiskGeometry uint32 - DiskType uint32 - Checksum uint32 - UniqueID [16]uint8 - SavedState uint8 - Reserved [427]uint8 -} - -func makeFixedVHDFooter(size int64) *vhdFooter { - footer := &vhdFooter{ - Features: featureMask, - FileFormatVersion: fileFormatVersionMagic, - DataOffset: fixedDataOffset, - CreatorVersion: creatorVersionMagic, - OriginalSize: size, - CurrentSize: size, - DiskType: diskTypeFixed, - UniqueID: generateUUID(), - } - copy(footer.Cookie[:], cookieMagic) - footer.Checksum = calculateCheckSum(footer) - return footer -} - -func calculateCheckSum(footer *vhdFooter) uint32 { - oldchk := footer.Checksum - footer.Checksum = 0 - - buf := &bytes.Buffer{} - _ = binary.Write(buf, binary.BigEndian, footer) - - var chk uint32 - bufBytes := buf.Bytes() - for i := 0; i < len(bufBytes); i++ { - chk += uint32(bufBytes[i]) - } - footer.Checksum = oldchk - return uint32(^chk) -} - -func generateUUID() [16]byte { - res := [16]byte{} - if _, err := rand.Read(res[:]); err != nil { - panic(err) - } - return res -} diff --git a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 deleted file mode 100644 index ce6edbc..0000000 --- a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 +++ /dev/null @@ -1,12 +0,0 @@ -# Requirements so far: -# dockerd running -# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar -# - image alpine (linux) docker pull --platform=linux alpine - - -# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true" -#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please" - -#pushd uvm -go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./... -#popd \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go deleted file mode 100644 index eefd88d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go +++ /dev/null @@ -1,304 +0,0 @@ -// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server -// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). -package hcn - -import ( - "encoding/json" - "fmt" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go - -/// HNS V1 API - -//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId -//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? - -/// HCN V2 API - -// Network -//sys hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNetworks? -//sys hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnCreateNetwork? -//sys hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnOpenNetwork? -//sys hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNetwork? -//sys hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNetworkProperties? -//sys hcnDeleteNetwork(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNetwork? -//sys hcnCloseNetwork(network hcnNetwork) (hr error) = computenetwork.HcnCloseNetwork? - -// Endpoint -//sys hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateEndpoints? -//sys hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnCreateEndpoint? -//sys hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnOpenEndpoint? -//sys hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) = computenetwork.HcnModifyEndpoint? -//sys hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryEndpointProperties? -//sys hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteEndpoint? -//sys hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) = computenetwork.HcnCloseEndpoint? - -// Namespace -//sys hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNamespaces? -//sys hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnCreateNamespace? -//sys hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnOpenNamespace? -//sys hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNamespace? -//sys hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNamespaceProperties? -//sys hcnDeleteNamespace(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNamespace? -//sys hcnCloseNamespace(namespace hcnNamespace) (hr error) = computenetwork.HcnCloseNamespace? - -// LoadBalancer -//sys hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateLoadBalancers? -//sys hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnCreateLoadBalancer? -//sys hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnOpenLoadBalancer? -//sys hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) = computenetwork.HcnModifyLoadBalancer? -//sys hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryLoadBalancerProperties? -//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer? -//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer? - -// SDN Routes -//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes? -//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute? -//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute? -//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute? -//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties? -//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute? -//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute? - -type _guid = guid.GUID - -type hcnNetwork syscall.Handle -type hcnEndpoint syscall.Handle -type hcnNamespace syscall.Handle -type hcnLoadBalancer syscall.Handle -type hcnRoute syscall.Handle - -// SchemaVersion for HCN Objects/Queries. -type SchemaVersion = Version // hcnglobals.go - -// HostComputeQueryFlags are passed in to a HostComputeQuery to determine which -// properties of an object are returned. -type HostComputeQueryFlags uint32 - -var ( - // HostComputeQueryFlagsNone returns an object with the standard properties. - HostComputeQueryFlagsNone HostComputeQueryFlags - // HostComputeQueryFlagsDetailed returns an object with all properties. - HostComputeQueryFlagsDetailed HostComputeQueryFlags = 1 -) - -// HostComputeQuery is the format for HCN queries. -type HostComputeQuery struct { - SchemaVersion SchemaVersion `json:""` - Flags HostComputeQueryFlags `json:",omitempty"` - Filter string `json:",omitempty"` -} - -type ExtraParams struct { - Resources json.RawMessage `json:",omitempty"` - SharedContainers json.RawMessage `json:",omitempty"` - LayeredOn string `json:",omitempty"` - SwitchGuid string `json:",omitempty"` - UtilityVM string `json:",omitempty"` - VirtualMachine string `json:",omitempty"` -} - -type Health struct { - Data interface{} `json:",omitempty"` - Extra ExtraParams `json:",omitempty"` -} - -// defaultQuery generates HCN Query. -// Passed into get/enumerate calls to filter results. -func defaultQuery() HostComputeQuery { - query := HostComputeQuery{ - SchemaVersion: SchemaVersion{ - Major: 2, - Minor: 0, - }, - Flags: HostComputeQueryFlagsNone, - } - return query -} - -// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS -func platformDoesNotSupportError(featureName string) error { - return fmt.Errorf("Platform does not support feature %s", featureName) -} - -// V2ApiSupported returns an error if the HCN version does not support the V2 Apis. -func V2ApiSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.Api.V2 { - return nil - } - return platformDoesNotSupportError("V2 Api/Schema") -} - -func V2SchemaVersion() SchemaVersion { - return SchemaVersion{ - Major: 2, - Minor: 0, - } -} - -// RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies. -func RemoteSubnetSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.RemoteSubnet { - return nil - } - return platformDoesNotSupportError("Remote Subnet") -} - -// HostRouteSupported returns an error if the HCN version does not support Host Route policies. -func HostRouteSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.HostRoute { - return nil - } - return platformDoesNotSupportError("Host Route") -} - -// DSRSupported returns an error if the HCN version does not support Direct Server Return. -func DSRSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.DSR { - return nil - } - return platformDoesNotSupportError("Direct Server Return (DSR)") -} - -// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes. -func Slash32EndpointPrefixesSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.Slash32EndpointPrefixes { - return nil - } - return platformDoesNotSupportError("Slash 32 Endpoint prefixes") -} - -// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN. -func AclSupportForProtocol252Supported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.AclSupportForProtocol252 { - return nil - } - return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN") -} - -// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity. -func SessionAffinitySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.SessionAffinity { - return nil - } - return platformDoesNotSupportError("Session Affinity") -} - -// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack. -func IPv6DualStackSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.IPv6DualStack { - return nil - } - return platformDoesNotSupportError("IPv6 DualStack") -} - -//L4proxySupported returns an error if the HCN verison does not support L4Proxy -func L4proxyPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.L4Proxy { - return nil - } - return platformDoesNotSupportError("L4ProxyPolicy") -} - -// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy -func L4WfpProxyPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.L4WfpProxy { - return nil - } - return platformDoesNotSupportError("L4WfpProxyPolicy") -} - -// SetPolicySupported returns an error if the HCN version does not support SetPolicy. -func SetPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.SetPolicy { - return nil - } - return platformDoesNotSupportError("SetPolicy") -} - -// VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port. -func VxlanPortSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.VxlanPort { - return nil - } - return platformDoesNotSupportError("VXLAN port configuration") -} - -// TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl. -func TierAclPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.TierAcl { - return nil - } - return platformDoesNotSupportError("TierAcl") -} - -// RequestType are the different operations performed to settings. -// Used to update the settings of Endpoint/Namespace objects. -type RequestType string - -var ( - // RequestTypeAdd adds the provided settings object. - RequestTypeAdd RequestType = "Add" - // RequestTypeRemove removes the provided settings object. - RequestTypeRemove RequestType = "Remove" - // RequestTypeUpdate replaces settings with the ones provided. - RequestTypeUpdate RequestType = "Update" - // RequestTypeRefresh refreshes the settings provided. - RequestTypeRefresh RequestType = "Refresh" -) diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go deleted file mode 100644 index 545e863..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go +++ /dev/null @@ -1,388 +0,0 @@ -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// IpConfig is assoicated with an endpoint -type IpConfig struct { - IpAddress string `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` -} - -// EndpointFlags are special settings on an endpoint. -type EndpointFlags uint32 - -var ( - // EndpointFlagsNone is the default. - EndpointFlagsNone EndpointFlags - // EndpointFlagsRemoteEndpoint means that an endpoint is on another host. - EndpointFlagsRemoteEndpoint EndpointFlags = 1 -) - -// HostComputeEndpoint represents a network endpoint -type HostComputeEndpoint struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - HostComputeNetwork string `json:",omitempty"` // GUID - HostComputeNamespace string `json:",omitempty"` // GUID - Policies []EndpointPolicy `json:",omitempty"` - IpConfigurations []IpConfig `json:",omitempty"` - Dns Dns `json:",omitempty"` - Routes []Route `json:",omitempty"` - MacAddress string `json:",omitempty"` - Flags EndpointFlags `json:",omitempty"` - Health Health `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// EndpointResourceType are the two different Endpoint settings resources. -type EndpointResourceType string - -var ( - // EndpointResourceTypePolicy is for Endpoint Policies. Ex: ACL, NAT - EndpointResourceTypePolicy EndpointResourceType = "Policy" - // EndpointResourceTypePort is for Endpoint Port settings. - EndpointResourceTypePort EndpointResourceType = "Port" -) - -// ModifyEndpointSettingRequest is the structure used to send request to modify an endpoint. -// Used to update policy/port on an endpoint. -type ModifyEndpointSettingRequest struct { - ResourceType EndpointResourceType `json:",omitempty"` // Policy, Port - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -// VmEndpointRequest creates a switch port with identifier `PortId`. -type VmEndpointRequest struct { - PortId guid.GUID `json:",omitempty"` - VirtualNicName string `json:",omitempty"` - VirtualMachineId guid.GUID `json:",omitempty"` -} - -type PolicyEndpointRequest struct { - Policies []EndpointPolicy `json:",omitempty"` -} - -func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) { - // Open endpoint. - var ( - endpointHandle hcnEndpoint - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hr = hcnQueryEndpointProperties(endpointHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { - // Enumerate all Endpoint Guids - var ( - resultBuffer *uint16 - endpointBuffer *uint16 - ) - hr := hcnEnumerateEndpoints(query, &endpointBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateEndpoints", hr, resultBuffer); err != nil { - return nil, err - } - - endpoints := interop.ConvertAndFreeCoTaskMemString(endpointBuffer) - var endpointIds []guid.GUID - err := json.Unmarshal([]byte(endpoints), &endpointIds) - if err != nil { - return nil, err - } - - var outputEndpoints []HostComputeEndpoint - for _, endpointGuid := range endpointIds { - endpoint, err := getEndpoint(endpointGuid, query) - if err != nil { - return nil, err - } - outputEndpoints = append(outputEndpoints, *endpoint) - } - return outputEndpoints, nil -} - -func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return nil, errInvalidNetworkID - } - // Open network. - var networkHandle hcnNetwork - var resultBuffer *uint16 - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Create endpoint. - endpointId := guid.GUID{} - var endpointHandle hcnEndpoint - hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - var propertiesBuffer *uint16 - hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) { - endpointGuid, err := guid.FromString(endpointId) - if err != nil { - return nil, errInvalidEndpointID - } - // Open endpoint - var ( - endpointHandle hcnEndpoint - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Modify endpoint - hr = hcnModifyEndpoint(endpointHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func deleteEndpoint(endpointId string) error { - endpointGuid, err := guid.FromString(endpointId) - if err != nil { - return errInvalidEndpointID - } - var resultBuffer *uint16 - hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListEndpoints makes a call to list all available endpoints. -func ListEndpoints() ([]HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - return endpoints, nil -} - -// ListEndpointsQuery makes a call to query the list of available endpoints. -func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - endpoints, err := enumerateEndpoints(string(queryJson)) - if err != nil { - return nil, err - } - return endpoints, nil -} - -// ListEndpointsOfNetwork queries the list of endpoints on a network. -func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - // TODO: Once query can convert schema, change to {HostComputeNetwork:networkId} - mapA := map[string]string{"VirtualNetwork": networkId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - return ListEndpointsQuery(hcnQuery) -} - -// GetEndpointByID returns an endpoint specified by Id -func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": endpointId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(endpoints) == 0 { - return nil, EndpointNotFoundError{EndpointID: endpointId} - } - return &endpoints[0], err -} - -// GetEndpointByName returns an endpoint specified by Name -func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"Name": endpointName} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(endpoints) == 0 { - return nil, EndpointNotFoundError{EndpointName: endpointName} - } - return &endpoints[0], err -} - -// Create Endpoint. -func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) { - logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id) - - if endpoint.HostComputeNamespace != "" { - return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set") - } - - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString) - endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return endpoint, nil -} - -// Delete Endpoint. -func (endpoint *HostComputeEndpoint) Delete() error { - logrus.Debugf("hcn::HostComputeEndpoint::Delete id=%s", endpoint.Id) - - if err := deleteEndpoint(endpoint.Id); err != nil { - return err - } - return nil -} - -// ModifyEndpointSettings updates the Port/Policy of an Endpoint. -func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error { - logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId) - - endpointSettingsRequest, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyEndpoint(endpointId, string(endpointSettingsRequest)) - if err != nil { - return err - } - return nil -} - -// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint. -func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error { - logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id) - - settingsJson, err := json.Marshal(endpointPolicy) - if err != nil { - return err - } - requestMessage := &ModifyEndpointSettingRequest{ - ResourceType: EndpointResourceTypePolicy, - RequestType: requestType, - Settings: settingsJson, - } - - return ModifyEndpointSettings(endpoint.Id, requestMessage) -} - -// NamespaceAttach modifies a Namespace to add an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error { - return AddNamespaceEndpoint(namespaceId, endpoint.Id) -} - -// NamespaceDetach modifies a Namespace to remove an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error { - return RemoveNamespaceEndpoint(namespaceId, endpoint.Id) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go deleted file mode 100644 index ad30d32..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server -// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). -package hcn - -import ( - "errors" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -var ( - errInvalidNetworkID = errors.New("invalid network ID") - errInvalidEndpointID = errors.New("invalid endpoint ID") - errInvalidNamespaceID = errors.New("invalid namespace ID") - errInvalidLoadBalancerID = errors.New("invalid load balancer ID") - errInvalidRouteID = errors.New("invalid route ID") -) - -func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { - errorFound := false - - if hr != nil { - errorFound = true - } - - result := "" - if resultBuffer != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultBuffer) - if result != "" { - errorFound = true - } - } - - if errorFound { - returnError := new(hr, methodName, result) - logrus.Debugf(returnError.Error()) // HCN errors logged for debugging. - return returnError - } - - return nil -} - -type ErrorCode uint32 - -// For common errors, define the error as it is in windows, so we can quickly determine it later -const ( - ERROR_NOT_FOUND = 0x490 - HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013 -) - -type HcnError struct { - *hcserror.HcsError - code ErrorCode -} - -func (e *HcnError) Error() string { - return e.HcsError.Error() -} - -func CheckErrorWithCode(err error, code ErrorCode) bool { - hcnError, ok := err.(*HcnError) - if ok { - return hcnError.code == code - } - return false -} - -func IsElementNotFoundError(err error) bool { - return CheckErrorWithCode(err, ERROR_NOT_FOUND) -} - -func IsPortAlreadyExistsError(err error) bool { - return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS) -} - -func new(hr error, title string, rest string) error { - err := &HcnError{} - hcsError := hcserror.New(hr, title, rest) - err.HcsError = hcsError.(*hcserror.HcsError) - err.code = ErrorCode(hcserror.Win32FromError(hr)) - return err -} - -// -// Note that the below errors are not errors returned by hcn itself -// we wish to seperate them as they are shim usage error -// - -// NetworkNotFoundError results from a failed seach for a network by Id or Name -type NetworkNotFoundError struct { - NetworkName string - NetworkID string -} - -func (e NetworkNotFoundError) Error() string { - if e.NetworkName != "" { - return fmt.Sprintf("Network name %q not found", e.NetworkName) - } - return fmt.Sprintf("Network ID %q not found", e.NetworkID) -} - -// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name -type EndpointNotFoundError struct { - EndpointName string - EndpointID string -} - -func (e EndpointNotFoundError) Error() string { - if e.EndpointName != "" { - return fmt.Sprintf("Endpoint name %q not found", e.EndpointName) - } - return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID) -} - -// NamespaceNotFoundError results from a failed seach for a namsepace by Id -type NamespaceNotFoundError struct { - NamespaceID string -} - -func (e NamespaceNotFoundError) Error() string { - return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID) -} - -// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id -type LoadBalancerNotFoundError struct { - LoadBalancerId string -} - -func (e LoadBalancerNotFoundError) Error() string { - return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) -} - -// RouteNotFoundError results from a failed seach for a route by Id -type RouteNotFoundError struct { - RouteId string -} - -func (e RouteNotFoundError) Error() string { - return fmt.Sprintf("SDN Route %q not found", e.RouteId) -} - -// IsNotFoundError returns a boolean indicating whether the error was caused by -// a resource not being found. -func IsNotFoundError(err error) bool { - switch pe := err.(type) { - case NetworkNotFoundError: - return true - case EndpointNotFoundError: - return true - case NamespaceNotFoundError: - return true - case LoadBalancerNotFoundError: - return true - case RouteNotFoundError: - return true - case *hcserror.HcsError: - return pe.Err == hcs.ErrElementNotFound - } - return false -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go deleted file mode 100644 index d03c487..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go +++ /dev/null @@ -1,132 +0,0 @@ -package hcn - -import ( - "encoding/json" - "fmt" - "math" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// Globals are all global properties of the HCN Service. -type Globals struct { - Version Version `json:"Version"` -} - -// Version is the HCN Service version. -type Version struct { - Major int `json:"Major"` - Minor int `json:"Minor"` -} - -type VersionRange struct { - MinVersion Version - MaxVersion Version -} - -type VersionRanges []VersionRange - -var ( - // HNSVersion1803 added ACL functionality. - HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // V2ApiSupport allows the use of V2 Api calls and V2 Schema. - V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // Remote Subnet allows for Remote Subnet policies on Overlay networks - RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // A Host Route policy allows for local container to local host communication Overlay networks - HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 9.3 through 10.0 (not included), and 10.2+ allows for Direct Server Return for loadbalancing - DSRVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes - Slash32EndpointPrefixesVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN - AclSupportForProtocol252Version = VersionRanges{ - VersionRange{MinVersion: Version{Major: 11, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 12.0 allows for session affinity for loadbalancing - SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 11.10+ supports Ipv6 dual stack. - IPv6DualStackVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 13.0 allows for Set Policy support - SetPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 10.3 allows for VXLAN ports - VxlanPortVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 3}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 9.5 through 10.0(not included), 10.5 through 11.0(not included), 11.11 through 12.0(not included), 12.1 through 13.0(not included), 13.1+ allows for Network L4Proxy Policy support - L4ProxyPolicyVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 5}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 11, Minor: 11}, MaxVersion: Version{Major: 11, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 12, Minor: 1}, MaxVersion: Version{Major: 12, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 13, Minor: 1}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - - //HNS 13.2 allows for L4WfpProxy Policy support - L4WfpProxyPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 14.0 allows for TierAcl Policy support - TierAclPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 14, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} -) - -// GetGlobals returns the global properties of the HCN Service. -func GetGlobals() (*Globals, error) { - var version Version - err := hnsCall("GET", "/globals/version", "", &version) - if err != nil { - return nil, err - } - - globals := &Globals{ - Version: version, - } - - return globals, nil -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -func hnsCall(method, path, request string, returnResponse interface{}) error { - var responseBuffer *uint16 - logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) - - err := _hnsCall(method, path, request, &responseBuffer) - if err != nil { - return hcserror.New(err, "hnsCall ", "") - } - response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) - - hnsresponse := &hnsResponse{} - if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return err - } - - if !hnsresponse.Success { - return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) - } - - if len(hnsresponse.Output) == 0 { - return nil - } - - logrus.Debugf("Network Response : %s", hnsresponse.Output) - err = json.Unmarshal(hnsresponse.Output, returnResponse) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go deleted file mode 100644 index 1b434b0..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go +++ /dev/null @@ -1,311 +0,0 @@ -package hcn - -import ( - "encoding/json" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// LoadBalancerPortMapping is associated with HostComputeLoadBalancer -type LoadBalancerPortMapping struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - DistributionType LoadBalancerDistribution `json:",omitempty"` // EX: Distribute per connection = 0, distribute traffic of the same protocol per client IP = 1, distribute per client IP = 2 - Flags LoadBalancerPortMappingFlags `json:",omitempty"` -} - -// HostComputeLoadBalancer represents software load balancer. -type HostComputeLoadBalancer struct { - Id string `json:"ID,omitempty"` - HostComputeEndpoints []string `json:",omitempty"` - SourceVIP string `json:",omitempty"` - FrontendVIPs []string `json:",omitempty"` - PortMappings []LoadBalancerPortMapping `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` - Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn -} - -//LoadBalancerFlags modify settings for a loadbalancer. -type LoadBalancerFlags uint32 - -var ( - // LoadBalancerFlagsNone is the default. - LoadBalancerFlagsNone LoadBalancerFlags = 0 - // LoadBalancerFlagsDSR enables Direct Server Return (DSR) - LoadBalancerFlagsDSR LoadBalancerFlags = 1 - LoadBalancerFlagsIPv6 LoadBalancerFlags = 2 -) - -// LoadBalancerPortMappingFlags are special settings on a loadbalancer. -type LoadBalancerPortMappingFlags uint32 - -var ( - // LoadBalancerPortMappingFlagsNone is the default. - LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags - // LoadBalancerPortMappingFlagsILB enables internal loadbalancing. - LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1 - // LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host. - LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2 - // LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP. - LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4 - // LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP. - LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8 -) - -// LoadBalancerDistribution specifies how the loadbalancer distributes traffic. -type LoadBalancerDistribution uint32 - -var ( - // LoadBalancerDistributionNone is the default and loadbalances each connection to the same pod. - LoadBalancerDistributionNone LoadBalancerDistribution - // LoadBalancerDistributionSourceIPProtocol loadbalances all traffic of the same protocol from a client IP to the same pod. - LoadBalancerDistributionSourceIPProtocol LoadBalancerDistribution = 1 - // LoadBalancerDistributionSourceIP loadbalances all traffic from a client IP to the same pod. - LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2 -) - -func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { - // Open loadBalancer. - var ( - loadBalancerHandle hcnLoadBalancer - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) - if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Query loadBalancer. - hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close loadBalancer. - hr = hcnCloseLoadBalancer(loadBalancerHandle) - if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeLoadBalancer - var outputLoadBalancer HostComputeLoadBalancer - if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { - return nil, err - } - return &outputLoadBalancer, nil -} - -func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) { - // Enumerate all LoadBalancer Guids - var ( - resultBuffer *uint16 - loadBalancerBuffer *uint16 - ) - hr := hcnEnumerateLoadBalancers(query, &loadBalancerBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateLoadBalancers", hr, resultBuffer); err != nil { - return nil, err - } - - loadBalancers := interop.ConvertAndFreeCoTaskMemString(loadBalancerBuffer) - var loadBalancerIds []guid.GUID - if err := json.Unmarshal([]byte(loadBalancers), &loadBalancerIds); err != nil { - return nil, err - } - - var outputLoadBalancers []HostComputeLoadBalancer - for _, loadBalancerGuid := range loadBalancerIds { - loadBalancer, err := getLoadBalancer(loadBalancerGuid, query) - if err != nil { - return nil, err - } - outputLoadBalancers = append(outputLoadBalancers, *loadBalancer) - } - return outputLoadBalancers, nil -} - -func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { - // Create new loadBalancer. - var ( - loadBalancerHandle hcnLoadBalancer - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - loadBalancerGuid := guid.GUID{} - hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer) - if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Query loadBalancer. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close loadBalancer. - hr = hcnCloseLoadBalancer(loadBalancerHandle) - if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeLoadBalancer - var outputLoadBalancer HostComputeLoadBalancer - if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { - return nil, err - } - return &outputLoadBalancer, nil -} - -func deleteLoadBalancer(loadBalancerId string) error { - loadBalancerGuid, err := guid.FromString(loadBalancerId) - if err != nil { - return errInvalidLoadBalancerID - } - var resultBuffer *uint16 - hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListLoadBalancers makes a call to list all available loadBalancers. -func ListLoadBalancers() ([]HostComputeLoadBalancer, error) { - hcnQuery := defaultQuery() - loadBalancers, err := ListLoadBalancersQuery(hcnQuery) - if err != nil { - return nil, err - } - return loadBalancers, nil -} - -// ListLoadBalancersQuery makes a call to query the list of available loadBalancers. -func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - loadBalancers, err := enumerateLoadBalancers(string(queryJson)) - if err != nil { - return nil, err - } - return loadBalancers, nil -} - -// GetLoadBalancerByID returns the LoadBalancer specified by Id. -func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": loadBalancerId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - loadBalancers, err := ListLoadBalancersQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(loadBalancers) == 0 { - return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} - } - return &loadBalancers[0], err -} - -// Create LoadBalancer. -func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::Create id=%s", loadBalancer.Id) - - jsonString, err := json.Marshal(loadBalancer) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString) - loadBalancer, hcnErr := createLoadBalancer(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return loadBalancer, nil -} - -// Delete LoadBalancer. -func (loadBalancer *HostComputeLoadBalancer) Delete() error { - logrus.Debugf("hcn::HostComputeLoadBalancer::Delete id=%s", loadBalancer.Id) - - if err := deleteLoadBalancer(loadBalancer.Id); err != nil { - return err - } - return nil -} - -// AddEndpoint add an endpoint to a LoadBalancer -func (loadBalancer *HostComputeLoadBalancer) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::AddEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) - - err := loadBalancer.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) - - return loadBalancer.Create() -} - -// RemoveEndpoint removes an endpoint from a LoadBalancer -func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::RemoveEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) - - err := loadBalancer.Delete() - if err != nil { - return nil, err - } - - // Create a list of all the endpoints besides the one being removed - var endpoints []string - for _, endpointReference := range loadBalancer.HostComputeEndpoints { - if endpointReference == endpoint.Id { - continue - } - endpoints = append(endpoints, endpointReference) - } - loadBalancer.HostComputeEndpoints = endpoints - return loadBalancer.Create() -} - -// AddLoadBalancer for the specified endpoints -func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort) - - loadBalancer := &HostComputeLoadBalancer{ - SourceVIP: sourceVIP, - PortMappings: []LoadBalancerPortMapping{ - { - Protocol: uint32(protocol), - InternalPort: internalPort, - ExternalPort: externalPort, - Flags: portMappingFlags, - }, - }, - FrontendVIPs: frontendVIPs, - SchemaVersion: SchemaVersion{ - Major: 2, - Minor: 0, - }, - Flags: flags, - } - - for _, endpoint := range endpoints { - loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) - } - - return loadBalancer.Create() -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go deleted file mode 100644 index d2ef229..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go +++ /dev/null @@ -1,446 +0,0 @@ -package hcn - -import ( - "encoding/json" - "os" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - icni "github.com/Microsoft/hcsshim/internal/cni" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/regstate" - "github.com/Microsoft/hcsshim/internal/runhcs" - "github.com/sirupsen/logrus" -) - -// NamespaceResourceEndpoint represents an Endpoint attached to a Namespace. -type NamespaceResourceEndpoint struct { - Id string `json:"ID,"` -} - -// NamespaceResourceContainer represents a Container attached to a Namespace. -type NamespaceResourceContainer struct { - Id string `json:"ID,"` -} - -// NamespaceResourceType determines whether the Namespace resource is a Container or Endpoint. -type NamespaceResourceType string - -var ( - // NamespaceResourceTypeContainer are contianers associated with a Namespace. - NamespaceResourceTypeContainer NamespaceResourceType = "Container" - // NamespaceResourceTypeEndpoint are endpoints associated with a Namespace. - NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint" -) - -// NamespaceResource is associated with a namespace -type NamespaceResource struct { - Type NamespaceResourceType `json:","` // Container, Endpoint - Data json.RawMessage `json:","` -} - -// NamespaceType determines whether the Namespace is for a Host or Guest -type NamespaceType string - -var ( - // NamespaceTypeHost are host namespaces. - NamespaceTypeHost NamespaceType = "Host" - // NamespaceTypeHostDefault are host namespaces in the default compartment. - NamespaceTypeHostDefault NamespaceType = "HostDefault" - // NamespaceTypeGuest are guest namespaces. - NamespaceTypeGuest NamespaceType = "Guest" - // NamespaceTypeGuestDefault are guest namespaces in the default compartment. - NamespaceTypeGuestDefault NamespaceType = "GuestDefault" -) - -// HostComputeNamespace represents a namespace (AKA compartment) in -type HostComputeNamespace struct { - Id string `json:"ID,omitempty"` - NamespaceId uint32 `json:",omitempty"` - Type NamespaceType `json:",omitempty"` // Host, HostDefault, Guest, GuestDefault - Resources []NamespaceResource `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// ModifyNamespaceSettingRequest is the structure used to send request to modify a namespace. -// Used to Add/Remove an endpoints and containers to/from a namespace. -type ModifyNamespaceSettingRequest struct { - ResourceType NamespaceResourceType `json:",omitempty"` // Container, Endpoint - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) { - // Open namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hr = hcnQueryNamespaceProperties(namespaceHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNamespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func enumerateNamespaces(query string) ([]HostComputeNamespace, error) { - // Enumerate all Namespace Guids - var ( - resultBuffer *uint16 - namespaceBuffer *uint16 - ) - hr := hcnEnumerateNamespaces(query, &namespaceBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateNamespaces", hr, resultBuffer); err != nil { - return nil, err - } - - namespaces := interop.ConvertAndFreeCoTaskMemString(namespaceBuffer) - var namespaceIds []guid.GUID - if err := json.Unmarshal([]byte(namespaces), &namespaceIds); err != nil { - return nil, err - } - - var outputNamespaces []HostComputeNamespace - for _, namespaceGuid := range namespaceIds { - namespace, err := getNamespace(namespaceGuid, query) - if err != nil { - return nil, err - } - outputNamespaces = append(outputNamespaces, *namespace) - } - return outputNamespaces, nil -} - -func createNamespace(settings string) (*HostComputeNamespace, error) { - // Create new namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - namespaceGuid := guid.GUID{} - hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNamespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { - namespaceGuid, err := guid.FromString(namespaceId) - if err != nil { - return nil, errInvalidNamespaceID - } - // Open namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Modify namespace. - hr = hcnModifyNamespace(namespaceHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to Namespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func deleteNamespace(namespaceId string) error { - namespaceGuid, err := guid.FromString(namespaceId) - if err != nil { - return errInvalidNamespaceID - } - var resultBuffer *uint16 - hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListNamespaces makes a call to list all available namespaces. -func ListNamespaces() ([]HostComputeNamespace, error) { - hcnQuery := defaultQuery() - namespaces, err := ListNamespacesQuery(hcnQuery) - if err != nil { - return nil, err - } - return namespaces, nil -} - -// ListNamespacesQuery makes a call to query the list of available namespaces. -func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - namespaces, err := enumerateNamespaces(string(queryJson)) - if err != nil { - return nil, err - } - return namespaces, nil -} - -// GetNamespaceByID returns the Namespace specified by Id. -func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": namespaceId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - namespaces, err := ListNamespacesQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(namespaces) == 0 { - return nil, NamespaceNotFoundError{NamespaceID: namespaceId} - } - - return &namespaces[0], err -} - -// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. -func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) - if err != nil { - return nil, err - } - var endpointsIds []string - for _, resource := range namespace.Resources { - if resource.Type == "Endpoint" { - var endpointResource NamespaceResourceEndpoint - if err := json.Unmarshal([]byte(resource.Data), &endpointResource); err != nil { - return nil, err - } - endpointsIds = append(endpointsIds, endpointResource.Id) - } - } - return endpointsIds, nil -} - -// GetNamespaceContainerIds returns the containers of the Namespace specified by Id. -func GetNamespaceContainerIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) - if err != nil { - return nil, err - } - var containerIds []string - for _, resource := range namespace.Resources { - if resource.Type == "Container" { - var contaienrResource NamespaceResourceContainer - if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil { - return nil, err - } - containerIds = append(containerIds, contaienrResource.Id) - } - } - return containerIds, nil -} - -// NewNamespace creates a new Namespace object -func NewNamespace(nsType NamespaceType) *HostComputeNamespace { - return &HostComputeNamespace{ - Type: nsType, - SchemaVersion: V2SchemaVersion(), - } -} - -// Create Namespace. -func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) { - logrus.Debugf("hcn::HostComputeNamespace::Create id=%s", namespace.Id) - - jsonString, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString) - namespace, hcnErr := createNamespace(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return namespace, nil -} - -// Delete Namespace. -func (namespace *HostComputeNamespace) Delete() error { - logrus.Debugf("hcn::HostComputeNamespace::Delete id=%s", namespace.Id) - - if err := deleteNamespace(namespace.Id); err != nil { - return err - } - return nil -} - -// Sync Namespace endpoints with the appropriate sandbox container holding the -// network namespace open. If no sandbox container is found for this namespace -// this method is determined to be a success and will not return an error in -// this case. If the sandbox container is found and a sync is initiated any -// failures will be returned via this method. -// -// This call initiates a sync between endpoints and the matching UtilityVM -// hosting those endpoints. It is safe to call for any `NamespaceType` but -// `NamespaceTypeGuest` is the only case when a sync will actually occur. For -// `NamespaceTypeHost` the process container will be automatically synchronized -// when the the endpoint is added via `AddNamespaceEndpoint`. -// -// Note: This method sync's both additions and removals of endpoints from a -// `NamespaceTypeGuest` namespace. -func (namespace *HostComputeNamespace) Sync() error { - logrus.WithField("id", namespace.Id).Debugf("hcs::HostComputeNamespace::Sync") - - // We only attempt a sync for namespace guest. - if namespace.Type != NamespaceTypeGuest { - return nil - } - - // Look in the registry for the key to map from namespace id to pod-id - cfg, err := icni.LoadPersistedNamespaceConfig(namespace.Id) - if err != nil { - if regstate.IsNotFoundError(err) { - return nil - } - return err - } - req := runhcs.VMRequest{ - ID: cfg.ContainerID, - Op: runhcs.OpSyncNamespace, - } - shimPath := runhcs.VMPipePath(cfg.HostUniqueID) - if err := runhcs.IssueVMRequest(shimPath, &req); err != nil { - // The shim is likey gone. Simply ignore the sync as if it didn't exist. - if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { - // Remove the reg key there is no point to try again - _ = cfg.Remove() - return nil - } - f := map[string]interface{}{ - "id": namespace.Id, - "container-id": cfg.ContainerID, - } - logrus.WithFields(f). - WithError(err). - Debugf("hcs::HostComputeNamespace::Sync failed to connect to shim pipe: '%s'", shimPath) - return err - } - return nil -} - -// ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace. -func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error { - logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId) - - namespaceSettings, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyNamespace(namespaceId, string(namespaceSettings)) - if err != nil { - return err - } - return nil -} - -// AddNamespaceEndpoint adds an endpoint to a Namespace. -func AddNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId) - - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) - if err != nil { - return err - } - requestMessage := &ModifyNamespaceSettingRequest{ - ResourceType: NamespaceResourceTypeEndpoint, - RequestType: RequestTypeAdd, - Settings: settingsJson, - } - - return ModifyNamespaceSettings(namespaceId, requestMessage) -} - -// RemoveNamespaceEndpoint removes an endpoint from a Namespace. -func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId) - - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) - if err != nil { - return err - } - requestMessage := &ModifyNamespaceSettingRequest{ - ResourceType: NamespaceResourceTypeEndpoint, - RequestType: RequestTypeRemove, - Settings: settingsJson, - } - - return ModifyNamespaceSettings(namespaceId, requestMessage) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go deleted file mode 100644 index c36b136..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go +++ /dev/null @@ -1,462 +0,0 @@ -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// Route is associated with a subnet. -type Route struct { - NextHop string `json:",omitempty"` - DestinationPrefix string `json:",omitempty"` - Metric uint16 `json:",omitempty"` -} - -// Subnet is associated with a Ipam. -type Subnet struct { - IpAddressPrefix string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - Routes []Route `json:",omitempty"` -} - -// Ipam (Internet Protocol Address Management) is associated with a network -// and represents the address space(s) of a network. -type Ipam struct { - Type string `json:",omitempty"` // Ex: Static, DHCP - Subnets []Subnet `json:",omitempty"` -} - -// MacRange is associated with MacPool and respresents the start and end addresses. -type MacRange struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// MacPool is associated with a network and represents pool of MacRanges. -type MacPool struct { - Ranges []MacRange `json:",omitempty"` -} - -// Dns (Domain Name System is associated with a network). -type Dns struct { - Domain string `json:",omitempty"` - Search []string `json:",omitempty"` - ServerList []string `json:",omitempty"` - Options []string `json:",omitempty"` -} - -// NetworkType are various networks. -type NetworkType string - -// NetworkType const -const ( - NAT NetworkType = "NAT" - Transparent NetworkType = "Transparent" - L2Bridge NetworkType = "L2Bridge" - L2Tunnel NetworkType = "L2Tunnel" - ICS NetworkType = "ICS" - Private NetworkType = "Private" - Overlay NetworkType = "Overlay" -) - -// NetworkFlags are various network flags. -type NetworkFlags uint32 - -// NetworkFlags const -const ( - None NetworkFlags = 0 - EnableNonPersistent NetworkFlags = 8 -) - -// HostComputeNetwork represents a network -type HostComputeNetwork struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - Type NetworkType `json:",omitempty"` - Policies []NetworkPolicy `json:",omitempty"` - MacPool MacPool `json:",omitempty"` - Dns Dns `json:",omitempty"` - Ipams []Ipam `json:",omitempty"` - Flags NetworkFlags `json:",omitempty"` // 0: None - Health Health `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// NetworkResourceType are the 3 different Network settings resources. -type NetworkResourceType string - -var ( - // NetworkResourceTypePolicy is for Network's policies. Ex: RemoteSubnet - NetworkResourceTypePolicy NetworkResourceType = "Policy" - // NetworkResourceTypeDNS is for Network's DNS settings. - NetworkResourceTypeDNS NetworkResourceType = "DNS" - // NetworkResourceTypeExtension is for Network's extension settings. - NetworkResourceTypeExtension NetworkResourceType = "Extension" -) - -// ModifyNetworkSettingRequest is the structure used to send request to modify an network. -// Used to update DNS/extension/policy on an network. -type ModifyNetworkSettingRequest struct { - ResourceType NetworkResourceType `json:",omitempty"` // Policy, DNS, Extension - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -type PolicyNetworkRequest struct { - Policies []NetworkPolicy `json:",omitempty"` -} - -func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) { - // Open network. - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hr = hcnQueryNetworkProperties(networkHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func enumerateNetworks(query string) ([]HostComputeNetwork, error) { - // Enumerate all Network Guids - var ( - resultBuffer *uint16 - networkBuffer *uint16 - ) - hr := hcnEnumerateNetworks(query, &networkBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateNetworks", hr, resultBuffer); err != nil { - return nil, err - } - - networks := interop.ConvertAndFreeCoTaskMemString(networkBuffer) - var networkIds []guid.GUID - if err := json.Unmarshal([]byte(networks), &networkIds); err != nil { - return nil, err - } - - var outputNetworks []HostComputeNetwork - for _, networkGuid := range networkIds { - network, err := getNetwork(networkGuid, query) - if err != nil { - return nil, err - } - outputNetworks = append(outputNetworks, *network) - } - return outputNetworks, nil -} - -func createNetwork(settings string) (*HostComputeNetwork, error) { - // Create new network. - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - networkGuid := guid.GUID{} - hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return nil, errInvalidNetworkID - } - // Open Network - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Modify Network - hr = hcnModifyNetwork(networkHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func deleteNetwork(networkId string) error { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return errInvalidNetworkID - } - var resultBuffer *uint16 - hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListNetworks makes a call to list all available networks. -func ListNetworks() ([]HostComputeNetwork, error) { - hcnQuery := defaultQuery() - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - return networks, nil -} - -// ListNetworksQuery makes a call to query the list of available networks. -func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - networks, err := enumerateNetworks(string(queryJson)) - if err != nil { - return nil, err - } - return networks, nil -} - -// GetNetworkByID returns the network specified by Id. -func GetNetworkByID(networkID string) (*HostComputeNetwork, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": networkID} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(networks) == 0 { - return nil, NetworkNotFoundError{NetworkID: networkID} - } - return &networks[0], err -} - -// GetNetworkByName returns the network specified by Name. -func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"Name": networkName} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(networks) == 0 { - return nil, NetworkNotFoundError{NetworkName: networkName} - } - return &networks[0], err -} - -// Create Network. -func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { - logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) - for _, ipam := range network.Ipams { - for _, subnet := range ipam.Subnets { - if subnet.IpAddressPrefix != "" { - hasDefault := false - for _, route := range subnet.Routes { - if route.NextHop == "" { - return nil, errors.New("network create error, subnet has address prefix but no gateway specified") - } - if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" { - hasDefault = true - } - } - if !hasDefault { - return nil, errors.New("network create error, no default gateway") - } - } - } - } - - jsonString, err := json.Marshal(network) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString) - network, hcnErr := createNetwork(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return network, nil -} - -// Delete Network. -func (network *HostComputeNetwork) Delete() error { - logrus.Debugf("hcn::HostComputeNetwork::Delete id=%s", network.Id) - - if err := deleteNetwork(network.Id); err != nil { - return err - } - return nil -} - -// ModifyNetworkSettings updates the Policy for a network. -func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkSettingRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::ModifyNetworkSettings id=%s", network.Id) - - networkSettingsRequest, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyNetwork(network.Id, string(networkSettingsRequest)) - if err != nil { - return err - } - return nil -} - -// AddPolicy applies a Policy (ex: RemoteSubnet) on the Network. -func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id) - - settingsJson, err := json.Marshal(networkPolicy) - if err != nil { - return err - } - requestMessage := &ModifyNetworkSettingRequest{ - ResourceType: NetworkResourceTypePolicy, - RequestType: RequestTypeAdd, - Settings: settingsJson, - } - - return network.ModifyNetworkSettings(requestMessage) -} - -// RemovePolicy removes a Policy (ex: RemoteSubnet) from the Network. -func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id) - - settingsJson, err := json.Marshal(networkPolicy) - if err != nil { - return err - } - requestMessage := &ModifyNetworkSettingRequest{ - ResourceType: NetworkResourceTypePolicy, - RequestType: RequestTypeRemove, - Settings: settingsJson, - } - - return network.ModifyNetworkSettings(requestMessage) -} - -// CreateEndpoint creates an endpoint on the Network. -func (network *HostComputeNetwork) CreateEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { - isRemote := endpoint.Flags&EndpointFlagsRemoteEndpoint != 0 - logrus.Debugf("hcn::HostComputeNetwork::CreatEndpoint, networkId=%s remote=%t", network.Id, isRemote) - - endpoint.HostComputeNetwork = network.Id - endpointSettings, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - newEndpoint, err := createEndpoint(network.Id, string(endpointSettings)) - if err != nil { - return nil, err - } - return newEndpoint, nil -} - -// CreateRemoteEndpoint creates a remote endpoint on the Network. -func (network *HostComputeNetwork) CreateRemoteEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { - endpoint.Flags = EndpointFlagsRemoteEndpoint | endpoint.Flags - return network.CreateEndpoint(endpoint) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go deleted file mode 100644 index 29651bb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go +++ /dev/null @@ -1,329 +0,0 @@ -package hcn - -import ( - "encoding/json" -) - -// EndpointPolicyType are the potential Policies that apply to Endpoints. -type EndpointPolicyType string - -// EndpointPolicyType const -const ( - PortMapping EndpointPolicyType = "PortMapping" - ACL EndpointPolicyType = "ACL" - QOS EndpointPolicyType = "QOS" - L2Driver EndpointPolicyType = "L2Driver" - OutBoundNAT EndpointPolicyType = "OutBoundNAT" - SDNRoute EndpointPolicyType = "SDNRoute" - L4Proxy EndpointPolicyType = "L4Proxy" - L4WFPPROXY EndpointPolicyType = "L4WFPPROXY" - PortName EndpointPolicyType = "PortName" - EncapOverhead EndpointPolicyType = "EncapOverhead" - IOV EndpointPolicyType = "Iov" - // Endpoint and Network have InterfaceConstraint and ProviderAddress - NetworkProviderAddress EndpointPolicyType = "ProviderAddress" - NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" - TierAcl EndpointPolicyType = "TierAcl" -) - -// EndpointPolicy is a collection of Policy settings for an Endpoint. -type EndpointPolicy struct { - Type EndpointPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// NetworkPolicyType are the potential Policies that apply to Networks. -type NetworkPolicyType string - -// NetworkPolicyType const -const ( - SourceMacAddress NetworkPolicyType = "SourceMacAddress" - NetAdapterName NetworkPolicyType = "NetAdapterName" - VSwitchExtension NetworkPolicyType = "VSwitchExtension" - DrMacAddress NetworkPolicyType = "DrMacAddress" - AutomaticDNS NetworkPolicyType = "AutomaticDNS" - InterfaceConstraint NetworkPolicyType = "InterfaceConstraint" - ProviderAddress NetworkPolicyType = "ProviderAddress" - RemoteSubnetRoute NetworkPolicyType = "RemoteSubnetRoute" - VxlanPort NetworkPolicyType = "VxlanPort" - HostRoute NetworkPolicyType = "HostRoute" - SetPolicy NetworkPolicyType = "SetPolicy" - NetworkL4Proxy NetworkPolicyType = "L4Proxy" - LayerConstraint NetworkPolicyType = "LayerConstraint" -) - -// NetworkPolicy is a collection of Policy settings for a Network. -type NetworkPolicy struct { - Type NetworkPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// SubnetPolicyType are the potential Policies that apply to Subnets. -type SubnetPolicyType string - -// SubnetPolicyType const -const ( - VLAN SubnetPolicyType = "VLAN" - VSID SubnetPolicyType = "VSID" -) - -// SubnetPolicy is a collection of Policy settings for a Subnet. -type SubnetPolicy struct { - Type SubnetPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// NatFlags are flags for portmappings. -type NatFlags uint32 - -const ( - NatFlagsNone NatFlags = iota - NatFlagsLocalRoutedVip - NatFlagsIPv6 -) - -/// Endpoint Policy objects - -// PortMappingPolicySetting defines Port Mapping (NAT) -type PortMappingPolicySetting struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - VIP string `json:",omitempty"` - Flags NatFlags `json:",omitempty"` -} - -// ActionType associated with ACLs. Value is either Allow or Block. -type ActionType string - -// DirectionType associated with ACLs. Value is either In or Out. -type DirectionType string - -// RuleType associated with ACLs. Value is either Host (WFP) or Switch (VFP). -type RuleType string - -const ( - // Allow traffic - ActionTypeAllow ActionType = "Allow" - // Block traffic - ActionTypeBlock ActionType = "Block" - // Pass traffic - ActionTypePass ActionType = "Pass" - - // In is traffic coming to the Endpoint - DirectionTypeIn DirectionType = "In" - // Out is traffic leaving the Endpoint - DirectionTypeOut DirectionType = "Out" - - // Host creates WFP (Windows Firewall) rules - RuleTypeHost RuleType = "Host" - // Switch creates VFP (Virtual Filter Platform) rules - RuleTypeSwitch RuleType = "Switch" -) - -// AclPolicySetting creates firewall rules on an endpoint -type AclPolicySetting struct { - Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) - Action ActionType `json:","` - Direction DirectionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - RuleType RuleType `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. -type QosPolicySetting struct { - MaximumOutgoingBandwidthInBytes uint64 -} - -// OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint. -type OutboundNatPolicySetting struct { - VirtualIP string `json:",omitempty"` - Exceptions []string `json:",omitempty"` - Destinations []string `json:",omitempty"` - Flags NatFlags `json:",omitempty"` -} - -// SDNRoutePolicySetting sets SDN Route on an Endpoint. -type SDNRoutePolicySetting struct { - DestinationPrefix string `json:",omitempty"` - NextHop string `json:",omitempty"` - NeedEncap bool `json:",omitempty"` -} - -// FiveTuple is nested in L4ProxyPolicySetting for WFP support. -type FiveTuple struct { - Protocols string `json:",omitempty"` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// ProxyExceptions exempts traffic to IpAddresses and Ports -type ProxyExceptions struct { - IpAddressExceptions []string `json:",omitempty"` - PortExceptions []string `json:",omitempty"` -} - -// L4WfpProxyPolicySetting sets Layer-4 Proxy on an endpoint. -type L4WfpProxyPolicySetting struct { - InboundProxyPort string `json:",omitempty"` - OutboundProxyPort string `json:",omitempty"` - FilterTuple FiveTuple `json:",omitempty"` - UserSID string `json:",omitempty"` - InboundExceptions ProxyExceptions `json:",omitempty"` - OutboundExceptions ProxyExceptions `json:",omitempty"` -} - -// PortnameEndpointPolicySetting sets the port name for an endpoint. -type PortnameEndpointPolicySetting struct { - Name string `json:",omitempty"` -} - -// EncapOverheadEndpointPolicySetting sets the encap overhead for an endpoint. -type EncapOverheadEndpointPolicySetting struct { - Overhead uint16 `json:",omitempty"` -} - -// IovPolicySetting sets the Iov settings for an endpoint. -type IovPolicySetting struct { - IovOffloadWeight uint32 `json:",omitempty"` - QueuePairsRequested uint32 `json:",omitempty"` - InterruptModeration uint32 `json:",omitempty"` -} - -/// Endpoint and Network Policy objects - -// ProviderAddressEndpointPolicySetting sets the PA for an endpoint. -type ProviderAddressEndpointPolicySetting struct { - ProviderAddress string `json:",omitempty"` -} - -// InterfaceConstraintPolicySetting limits an Endpoint or Network to a specific Nic. -type InterfaceConstraintPolicySetting struct { - InterfaceGuid string `json:",omitempty"` - InterfaceLuid uint64 `json:",omitempty"` - InterfaceIndex uint32 `json:",omitempty"` - InterfaceMediaType uint32 `json:",omitempty"` - InterfaceAlias string `json:",omitempty"` - InterfaceDescription string `json:",omitempty"` -} - -/// Network Policy objects - -// SourceMacAddressNetworkPolicySetting sets source MAC for a network. -type SourceMacAddressNetworkPolicySetting struct { - SourceMacAddress string `json:",omitempty"` -} - -// NetAdapterNameNetworkPolicySetting sets network adapter of a network. -type NetAdapterNameNetworkPolicySetting struct { - NetworkAdapterName string `json:",omitempty"` -} - -// VSwitchExtensionNetworkPolicySetting enables/disabled VSwitch extensions for a network. -type VSwitchExtensionNetworkPolicySetting struct { - ExtensionID string `json:",omitempty"` - Enable bool `json:",omitempty"` -} - -// DrMacAddressNetworkPolicySetting sets the DR MAC for a network. -type DrMacAddressNetworkPolicySetting struct { - Address string `json:",omitempty"` -} - -// AutomaticDNSNetworkPolicySetting enables/disables automatic DNS on a network. -type AutomaticDNSNetworkPolicySetting struct { - Enable bool `json:",omitempty"` -} - -type LayerConstraintNetworkPolicySetting struct { - LayerId string `json:",omitempty"` -} - -/// Subnet Policy objects - -// VlanPolicySetting isolates a subnet with VLAN tagging. -type VlanPolicySetting struct { - IsolationId uint32 `json:","` -} - -// VsidPolicySetting isolates a subnet with VSID tagging. -type VsidPolicySetting struct { - IsolationId uint32 `json:","` -} - -// RemoteSubnetRoutePolicySetting creates remote subnet route rules on a network -type RemoteSubnetRoutePolicySetting struct { - DestinationPrefix string - IsolationId uint16 - ProviderAddress string - DistributedRouterMacAddress string -} - -// SetPolicyTypes associated with SetPolicy. Value is IPSET. -type SetPolicyType string - -const ( - SetPolicyTypeIpSet SetPolicyType = "IPSET" -) - -// SetPolicySetting creates IPSets on network -type SetPolicySetting struct { - Id string - Name string - Type SetPolicyType - Values string -} - -// VxlanPortPolicySetting allows configuring the VXLAN TCP port -type VxlanPortPolicySetting struct { - Port uint16 -} - -// ProtocolType associated with L4ProxyPolicy -type ProtocolType uint32 - -const ( - ProtocolTypeUnknown ProtocolType = 0 - ProtocolTypeICMPv4 ProtocolType = 1 - ProtocolTypeIGMP ProtocolType = 2 - ProtocolTypeTCP ProtocolType = 6 - ProtocolTypeUDP ProtocolType = 17 - ProtocolTypeICMPv6 ProtocolType = 58 -) - -//L4ProxyPolicySetting applies proxy policy on network/endpoint -type L4ProxyPolicySetting struct { - IP string `json:",omitempty"` - Port string `json:",omitempty"` - Protocol ProtocolType `json:",omitempty"` - Exceptions []string `json:",omitempty"` - Destination string - OutboundNAT bool `json:",omitempty"` -} - -// TierAclRule represents an ACL within TierAclPolicySetting -type TierAclRule struct { - Id string `json:",omitempty"` - Protocols string `json:",omitempty"` - TierAclRuleAction ActionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// TierAclPolicySetting represents a Tier containing ACLs -type TierAclPolicySetting struct { - Name string `json:","` - Direction DirectionType `json:","` - Order uint16 `json:""` - TierAclRules []TierAclRule `json:",omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go deleted file mode 100644 index d6d2707..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go +++ /dev/null @@ -1,266 +0,0 @@ -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// HostComputeRoute represents SDN routes. -type HostComputeRoute struct { - ID string `json:"ID,omitempty"` - HostComputeEndpoints []string `json:",omitempty"` - Setting []SDNRoutePolicySetting `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// ListRoutes makes a call to list all available routes. -func ListRoutes() ([]HostComputeRoute, error) { - hcnQuery := defaultQuery() - routes, err := ListRoutesQuery(hcnQuery) - if err != nil { - return nil, err - } - return routes, nil -} - -// ListRoutesQuery makes a call to query the list of available routes. -func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) { - queryJSON, err := json.Marshal(query) - if err != nil { - return nil, err - } - - routes, err := enumerateRoutes(string(queryJSON)) - if err != nil { - return nil, err - } - return routes, nil -} - -// GetRouteByID returns the route specified by Id. -func GetRouteByID(routeID string) (*HostComputeRoute, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": routeID} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - routes, err := ListRoutesQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(routes) == 0 { - return nil, RouteNotFoundError{RouteId: routeID} - } - return &routes[0], err -} - -// Create Route. -func (route *HostComputeRoute) Create() (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID) - - jsonString, err := json.Marshal(route) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString) - route, hcnErr := createRoute(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return route, nil -} - -// Delete Route. -func (route *HostComputeRoute) Delete() error { - logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID) - - existingRoute, _ := GetRouteByID(route.ID) - - if existingRoute != nil { - if err := deleteRoute(route.ID); err != nil { - return err - } - } - - return nil -} - -// AddEndpoint add an endpoint to a route -// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add -func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) - - err := route.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) - - return route.Create() -} - -// RemoveEndpoint removes an endpoint from a route -// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add -func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) - - err := route.Delete() - if err != nil { - return nil, err - } - - // Create a list of all the endpoints besides the one being removed - i := 0 - for index, endpointReference := range route.HostComputeEndpoints { - if endpointReference == endpoint.Id { - i = index - break - } - } - - route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...) - return route.Create() -} - -// AddRoute for the specified endpoints and SDN Route setting -func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) - - if len(endpoints) <= 0 { - return nil, errors.New("Missing endpoints") - } - - route := &HostComputeRoute{ - SchemaVersion: V2SchemaVersion(), - Setting: []SDNRoutePolicySetting{ - { - DestinationPrefix: destinationPrefix, - NextHop: nextHop, - NeedEncap: needEncapsulation, - }, - }, - } - - for _, endpoint := range endpoints { - route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) - } - - return route.Create() -} - -func enumerateRoutes(query string) ([]HostComputeRoute, error) { - // Enumerate all routes Guids - var ( - resultBuffer *uint16 - routeBuffer *uint16 - ) - hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil { - return nil, err - } - - routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer) - var routeIds []guid.GUID - if err := json.Unmarshal([]byte(routes), &routeIds); err != nil { - return nil, err - } - - var outputRoutes []HostComputeRoute - for _, routeGUID := range routeIds { - route, err := getRoute(routeGUID, query) - if err != nil { - return nil, err - } - outputRoutes = append(outputRoutes, *route) - } - return outputRoutes, nil -} - -func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) { - // Open routes. - var ( - routeHandle hcnRoute - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer) - if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil { - return nil, err - } - // Query routes. - hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close routes. - hr = hcnCloseRoute(routeHandle) - if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeRoute - var outputRoute HostComputeRoute - if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { - return nil, err - } - return &outputRoute, nil -} - -func createRoute(settings string) (*HostComputeRoute, error) { - // Create new route. - var ( - routeHandle hcnRoute - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - routeGUID := guid.GUID{} - hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer) - if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil { - return nil, err - } - // Query route. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close Route. - hr = hcnCloseRoute(routeHandle) - if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeRoute - var outputRoute HostComputeRoute - if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { - return nil, err - } - return &outputRoute, nil -} - -func deleteRoute(routeID string) error { - routeGUID, err := guid.FromString(routeID) - if err != nil { - return errInvalidRouteID - } - var resultBuffer *uint16 - hr := hcnDeleteRoute(&routeGUID, &resultBuffer) - if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go deleted file mode 100644 index 64f9e37..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go +++ /dev/null @@ -1,143 +0,0 @@ -package hcn - -import ( - "fmt" - "sync" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work - // multiple times. - featuresOnce sync.Once - featuresErr error - supportedFeatures SupportedFeatures -) - -// SupportedFeatures are the features provided by the Service. -type SupportedFeatures struct { - Acl AclFeatures `json:"ACL"` - Api ApiSupport `json:"API"` - RemoteSubnet bool `json:"RemoteSubnet"` - HostRoute bool `json:"HostRoute"` - DSR bool `json:"DSR"` - Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"` - AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"` - SessionAffinity bool `json:"SessionAffinity"` - IPv6DualStack bool `json:"IPv6DualStack"` - SetPolicy bool `json:"SetPolicy"` - VxlanPort bool `json:"VxlanPort"` - L4Proxy bool `json:"L4Proxy"` // network policy that applies VFP rules to all endpoints on the network to redirect traffic - L4WfpProxy bool `json:"L4WfpProxy"` // endpoint policy that applies WFP filters to redirect traffic to/from that endpoint - TierAcl bool `json:"TierAcl"` -} - -// AclFeatures are the supported ACL possibilities. -type AclFeatures struct { - AclAddressLists bool `json:"AclAddressLists"` - AclNoHostRulePriority bool `json:"AclHostRulePriority"` - AclPortRanges bool `json:"AclPortRanges"` - AclRuleId bool `json:"AclRuleId"` -} - -// ApiSupport lists the supported API versions. -type ApiSupport struct { - V1 bool `json:"V1"` - V2 bool `json:"V2"` -} - -// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called -// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the -// various hcn.IsXSupported methods need to be made. -func GetCachedSupportedFeatures() (SupportedFeatures, error) { - // Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to - // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant - // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy - // for example. - featuresOnce.Do(func() { - supportedFeatures, featuresErr = getSupportedFeatures() - }) - - return supportedFeatures, featuresErr -} - -// GetSupportedFeatures returns the features supported by the Service. -// -// Deprecated: Use GetCachedSupportedFeatures instead. -func GetSupportedFeatures() SupportedFeatures { - features, err := GetCachedSupportedFeatures() - if err != nil { - // Expected on pre-1803 builds, all features will be false/unsupported - logrus.WithError(err).Errorf("unable to obtain supported features") - return features - } - return features -} - -func getSupportedFeatures() (SupportedFeatures, error) { - var features SupportedFeatures - globals, err := GetGlobals() - if err != nil { - // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. - return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") - } - features.Acl = AclFeatures{ - AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), - AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), - AclPortRanges: isFeatureSupported(globals.Version, HNSVersion1803), - AclRuleId: isFeatureSupported(globals.Version, HNSVersion1803), - } - - features.Api = ApiSupport{ - V2: isFeatureSupported(globals.Version, V2ApiSupport), - V1: true, // HNSCall is still available. - } - - features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion) - features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion) - features.DSR = isFeatureSupported(globals.Version, DSRVersion) - features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion) - features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version) - features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion) - features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion) - features.SetPolicy = isFeatureSupported(globals.Version, SetPolicyVersion) - features.VxlanPort = isFeatureSupported(globals.Version, VxlanPortVersion) - features.L4Proxy = isFeatureSupported(globals.Version, L4ProxyPolicyVersion) - features.L4WfpProxy = isFeatureSupported(globals.Version, L4WfpProxyPolicyVersion) - features.TierAcl = isFeatureSupported(globals.Version, TierAclPolicyVersion) - - logrus.WithFields(logrus.Fields{ - "version": fmt.Sprintf("%+v", globals.Version), - "supportedFeatures": fmt.Sprintf("%+v", features), - }).Info("HCN feature check") - - return features, nil -} - -func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool { - isFeatureSupported := false - - for _, versionRange := range versionsSupported { - isFeatureSupported = isFeatureSupported || isFeatureInRange(currentVersion, versionRange) - } - - return isFeatureSupported -} - -func isFeatureInRange(currentVersion Version, versionRange VersionRange) bool { - if currentVersion.Major < versionRange.MinVersion.Major { - return false - } - if currentVersion.Major > versionRange.MaxVersion.Major { - return false - } - if currentVersion.Major == versionRange.MinVersion.Major && currentVersion.Minor < versionRange.MinVersion.Minor { - return false - } - if currentVersion.Major == versionRange.MaxVersion.Major && currentVersion.Minor > versionRange.MaxVersion.Minor { - return false - } - return true -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go deleted file mode 100644 index 7ec5b58..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go +++ /dev/null @@ -1,795 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package hcn - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll") - - procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") - procHNSCall = modvmcompute.NewProc("HNSCall") - procHcnEnumerateNetworks = modcomputenetwork.NewProc("HcnEnumerateNetworks") - procHcnCreateNetwork = modcomputenetwork.NewProc("HcnCreateNetwork") - procHcnOpenNetwork = modcomputenetwork.NewProc("HcnOpenNetwork") - procHcnModifyNetwork = modcomputenetwork.NewProc("HcnModifyNetwork") - procHcnQueryNetworkProperties = modcomputenetwork.NewProc("HcnQueryNetworkProperties") - procHcnDeleteNetwork = modcomputenetwork.NewProc("HcnDeleteNetwork") - procHcnCloseNetwork = modcomputenetwork.NewProc("HcnCloseNetwork") - procHcnEnumerateEndpoints = modcomputenetwork.NewProc("HcnEnumerateEndpoints") - procHcnCreateEndpoint = modcomputenetwork.NewProc("HcnCreateEndpoint") - procHcnOpenEndpoint = modcomputenetwork.NewProc("HcnOpenEndpoint") - procHcnModifyEndpoint = modcomputenetwork.NewProc("HcnModifyEndpoint") - procHcnQueryEndpointProperties = modcomputenetwork.NewProc("HcnQueryEndpointProperties") - procHcnDeleteEndpoint = modcomputenetwork.NewProc("HcnDeleteEndpoint") - procHcnCloseEndpoint = modcomputenetwork.NewProc("HcnCloseEndpoint") - procHcnEnumerateNamespaces = modcomputenetwork.NewProc("HcnEnumerateNamespaces") - procHcnCreateNamespace = modcomputenetwork.NewProc("HcnCreateNamespace") - procHcnOpenNamespace = modcomputenetwork.NewProc("HcnOpenNamespace") - procHcnModifyNamespace = modcomputenetwork.NewProc("HcnModifyNamespace") - procHcnQueryNamespaceProperties = modcomputenetwork.NewProc("HcnQueryNamespaceProperties") - procHcnDeleteNamespace = modcomputenetwork.NewProc("HcnDeleteNamespace") - procHcnCloseNamespace = modcomputenetwork.NewProc("HcnCloseNamespace") - procHcnEnumerateLoadBalancers = modcomputenetwork.NewProc("HcnEnumerateLoadBalancers") - procHcnCreateLoadBalancer = modcomputenetwork.NewProc("HcnCreateLoadBalancer") - procHcnOpenLoadBalancer = modcomputenetwork.NewProc("HcnOpenLoadBalancer") - procHcnModifyLoadBalancer = modcomputenetwork.NewProc("HcnModifyLoadBalancer") - procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") - procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") - procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") - procHcnEnumerateSdnRoutes = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes") - procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute") - procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute") - procHcnModifySdnRoute = modcomputenetwork.NewProc("HcnModifySdnRoute") - procHcnQuerySdnRouteProperties = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties") - procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute") - procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute") -) - -func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { - r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func _hnsCall(method string, path string, object string, response **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(method) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(object) - if hr != nil { - return - } - return __hnsCall(_p0, _p1, _p2, response) -} - -func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { - if hr = procHNSCall.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateNetworks(_p0, networks, result) -} - -func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateNetworks.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateNetwork(id, _p0, network, result) -} - -func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) { - if hr = procHcnCreateNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) { - if hr = procHcnOpenNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyNetwork(network, _p0, result) -} - -func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryNetworkProperties(network, _p0, properties, result) -} - -func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryNetworkProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseNetwork(network hcnNetwork) (hr error) { - if hr = procHcnCloseNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateEndpoints(_p0, endpoints, result) -} - -func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateEndpoints.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateEndpoint(network, id, _p0, endpoint, result) -} - -func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) { - if hr = procHcnCreateEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) { - if hr = procHcnOpenEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyEndpoint(endpoint, _p0, result) -} - -func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryEndpointProperties(endpoint, _p0, properties, result) -} - -func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryEndpointProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) { - if hr = procHcnCloseEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateNamespaces(_p0, namespaces, result) -} - -func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateNamespaces.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateNamespaces.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(namespaces)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateNamespace(id, _p0, namespace, result) -} - -func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) { - if hr = procHcnCreateNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) { - if hr = procHcnOpenNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyNamespace(namespace, _p0, result) -} - -func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryNamespaceProperties(namespace, _p0, properties, result) -} - -func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryNamespaceProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseNamespace(namespace hcnNamespace) (hr error) { - if hr = procHcnCloseNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result) -} - -func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateLoadBalancers.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result) -} - -func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - if hr = procHcnCreateLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - if hr = procHcnOpenLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyLoadBalancer(loadBalancer, _p0, result) -} - -func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result) -} - -func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryLoadBalancerProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { - if hr = procHcnCloseLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateRoutes(_p0, routes, result) -} - -func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateRoute(id, _p0, route, result) -} - -func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) { - if hr = procHcnCreateSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { - if hr = procHcnOpenSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyRoute(route, _p0, result) -} - -func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifySdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryRouteProperties(route, _p0, properties, result) -} - -func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseRoute(route hcnRoute) (hr error) { - if hr = procHcnCloseSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go deleted file mode 100644 index ceb3ac8..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hcsshim.go +++ /dev/null @@ -1,28 +0,0 @@ -// Shim for the Host Compute Service (HCS) to manage Windows Server -// containers and Hyper-V containers. - -package hcsshim - -import ( - "syscall" - - "github.com/Microsoft/hcsshim/internal/hcserror" -) - -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go - -//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId - -const ( - // Specific user-visible exit codes - WaitErrExecFailed = 32767 - - ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE - ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) - WSAEINVAL = syscall.Errno(10022) - - // Timeout on wait calls - TimeoutInfinite = 0xFFFFFFFF -) - -type HcsError = hcserror.HcsError diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go deleted file mode 100644 index 9e00594..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ /dev/null @@ -1,118 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint = hns.HNSEndpoint - -// HNSEndpointStats represent the stats for an networkendpoint in HNS -type HNSEndpointStats = hns.EndpointStats - -// Namespace represents a Compartment. -type Namespace = hns.Namespace - -//SystemType represents the type of the system on which actions are done -type SystemType string - -// SystemType const -const ( - ContainerType SystemType = "Container" - VirtualMachineType SystemType = "VirtualMachine" - HostType SystemType = "Host" -) - -// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest - -// EndpointResquestResponse is object to get the endpoint request response -type EndpointResquestResponse = hns.EndpointResquestResponse - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - return hns.HNSEndpointRequest(method, path, request) -} - -// HNSListEndpointRequest makes a HNS call to query the list of available endpoints -func HNSListEndpointRequest() ([]HNSEndpoint, error) { - return hns.HNSListEndpointRequest() -} - -// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container -func HotAttachEndpoint(containerID string, endpointID string) error { - endpoint, err := GetHNSEndpointByID(endpointID) - if err != nil { - return err - } - isAttached, err := endpoint.IsAttached(containerID) - if isAttached { - return err - } - return modifyNetworkEndpoint(containerID, endpointID, Add) -} - -// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container -func HotDetachEndpoint(containerID string, endpointID string) error { - endpoint, err := GetHNSEndpointByID(endpointID) - if err != nil { - return err - } - isAttached, err := endpoint.IsAttached(containerID) - if !isAttached { - return err - } - return modifyNetworkEndpoint(containerID, endpointID, Remove) -} - -// ModifyContainer corresponding to the container id, by sending a request -func modifyContainer(id string, request *ResourceModificationRequestResponse) error { - container, err := OpenContainer(id) - if err != nil { - if IsNotExist(err) { - return ErrComputeSystemDoesNotExist - } - return getInnerError(err) - } - defer container.Close() - err = container.Modify(request) - if err != nil { - if IsNotSupported(err) { - return ErrPlatformNotSupported - } - return getInnerError(err) - } - - return nil -} - -func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { - requestMessage := &ResourceModificationRequestResponse{ - Resource: Network, - Request: request, - Data: endpointID, - } - err := modifyContainer(containerID, requestMessage) - - if err != nil { - return err - } - - return nil -} - -// GetHNSEndpointByID get the Endpoint by ID -func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { - return hns.GetHNSEndpointByID(endpointID) -} - -// GetHNSEndpointByName gets the endpoint filtered by Name -func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { - return hns.GetHNSEndpointByName(endpointName) -} - -// GetHNSEndpointStats gets the endpoint stats by ID -func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { - return hns.GetHNSEndpointStats(endpointName) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go deleted file mode 100644 index 2b53819..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go +++ /dev/null @@ -1,16 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -type HNSGlobals = hns.HNSGlobals -type HNSVersion = hns.HNSVersion - -var ( - HNSVersion1803 = hns.HNSVersion1803 -) - -func GetHNSGlobals() (*HNSGlobals, error) { - return hns.GetHNSGlobals() -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go deleted file mode 100644 index f775fa1..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// Subnet is assoicated with a network and represents a list -// of subnets available to the network -type Subnet = hns.Subnet - -// MacPool is assoicated with a network and represents a list -// of macaddresses available to the network -type MacPool = hns.MacPool - -// HNSNetwork represents a network in HNS -type HNSNetwork = hns.HNSNetwork - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - return hns.HNSNetworkRequest(method, path, request) -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - return hns.HNSListNetworkRequest(method, path, request) -} - -// GetHNSNetworkByID -func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { - return hns.GetHNSNetworkByID(networkID) -} - -// GetHNSNetworkName filtered by Name -func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { - return hns.GetHNSNetworkByName(networkName) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go deleted file mode 100644 index 00ab263..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go +++ /dev/null @@ -1,60 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// Type of Request Support in ModifySystem -type PolicyType = hns.PolicyType - -// RequestType const -const ( - Nat = hns.Nat - ACL = hns.ACL - PA = hns.PA - VLAN = hns.VLAN - VSID = hns.VSID - VNet = hns.VNet - L2Driver = hns.L2Driver - Isolation = hns.Isolation - QOS = hns.QOS - OutboundNat = hns.OutboundNat - ExternalLoadBalancer = hns.ExternalLoadBalancer - Route = hns.Route - Proxy = hns.Proxy -) - -type ProxyPolicy = hns.ProxyPolicy - -type NatPolicy = hns.NatPolicy - -type QosPolicy = hns.QosPolicy - -type IsolationPolicy = hns.IsolationPolicy - -type VlanPolicy = hns.VlanPolicy - -type VsidPolicy = hns.VsidPolicy - -type PaPolicy = hns.PaPolicy - -type OutboundNatPolicy = hns.OutboundNatPolicy - -type ActionType = hns.ActionType -type DirectionType = hns.DirectionType -type RuleType = hns.RuleType - -const ( - Allow = hns.Allow - Block = hns.Block - - In = hns.In - Out = hns.Out - - Host = hns.Host - Switch = hns.Switch -) - -type ACLPolicy = hns.ACLPolicy - -type Policy = hns.Policy diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go deleted file mode 100644 index 55aaa4a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go +++ /dev/null @@ -1,47 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// RoutePolicy is a structure defining schema for Route based Policy -type RoutePolicy = hns.RoutePolicy - -// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy -type ELBPolicy = hns.ELBPolicy - -// LBPolicy is a structure defining schema for LoadBalancing based Policy -type LBPolicy = hns.LBPolicy - -// PolicyList is a structure defining schema for Policy list request -type PolicyList = hns.PolicyList - -// HNSPolicyListRequest makes a call into HNS to update/query a single network -func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { - return hns.HNSPolicyListRequest(method, path, request) -} - -// HNSListPolicyListRequest gets all the policy list -func HNSListPolicyListRequest() ([]PolicyList, error) { - return hns.HNSListPolicyListRequest() -} - -// PolicyListRequest makes a HNS call to modify/query a network policy list -func PolicyListRequest(method, path, request string) (*PolicyList, error) { - return hns.PolicyListRequest(method, path, request) -} - -// GetPolicyListByID get the policy list by ID -func GetPolicyListByID(policyListID string) (*PolicyList, error) { - return hns.GetPolicyListByID(policyListID) -} - -// AddLoadBalancer policy list for the specified endpoints -func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { - return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) -} - -// AddRoute adds route policy list for the specified endpoints -func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { - return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go deleted file mode 100644 index 6940524..0000000 --- a/vendor/github.com/Microsoft/hcsshim/hnssupport.go +++ /dev/null @@ -1,13 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -type HNSSupportedFeatures = hns.HNSSupportedFeatures - -type HNSAclFeatures = hns.HNSAclFeatures - -func GetHNSSupportedFeatures() HNSSupportedFeatures { - return hns.GetHNSSupportedFeatures() -} diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go deleted file mode 100644 index 300eb59..0000000 --- a/vendor/github.com/Microsoft/hcsshim/interface.go +++ /dev/null @@ -1,114 +0,0 @@ -package hcsshim - -import ( - "io" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs/schema1" -) - -// ProcessConfig is used as both the input of Container.CreateProcess -// and to convert the parameters to JSON for passing onto the HCS -type ProcessConfig = schema1.ProcessConfig - -type Layer = schema1.Layer -type MappedDir = schema1.MappedDir -type MappedPipe = schema1.MappedPipe -type HvRuntime = schema1.HvRuntime -type MappedVirtualDisk = schema1.MappedVirtualDisk - -// AssignedDevice represents a device that has been directly assigned to a container -// -// NOTE: Support added in RS5 -type AssignedDevice = schema1.AssignedDevice - -// ContainerConfig is used as both the input of CreateContainer -// and to convert the parameters to JSON for passing onto the HCS -type ContainerConfig = schema1.ContainerConfig - -type ComputeSystemQuery = schema1.ComputeSystemQuery - -// Container represents a created (but not necessarily running) container. -type Container interface { - // Start synchronously starts the container. - Start() error - - // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. - Shutdown() error - - // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. - Terminate() error - - // Waits synchronously waits for the container to shutdown or terminate. - Wait() error - - // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It - // returns false if timeout occurs. - WaitTimeout(time.Duration) error - - // Pause pauses the execution of a container. - Pause() error - - // Resume resumes the execution of a container. - Resume() error - - // HasPendingUpdates returns true if the container has updates pending to install. - HasPendingUpdates() (bool, error) - - // Statistics returns statistics for a container. - Statistics() (Statistics, error) - - // ProcessList returns details for the processes in a container. - ProcessList() ([]ProcessListItem, error) - - // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller - MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) - - // CreateProcess launches a new process within the container. - CreateProcess(c *ProcessConfig) (Process, error) - - // OpenProcess gets an interface to an existing process within the container. - OpenProcess(pid int) (Process, error) - - // Close cleans up any state associated with the container but does not terminate or wait for it. - Close() error - - // Modify the System - Modify(config *ResourceModificationRequestResponse) error -} - -// Process represents a running or exited process. -type Process interface { - // Pid returns the process ID of the process within the container. - Pid() int - - // Kill signals the process to terminate but does not wait for it to finish terminating. - Kill() error - - // Wait waits for the process to exit. - Wait() error - - // WaitTimeout waits for the process to exit or the duration to elapse. It returns - // false if timeout occurs. - WaitTimeout(time.Duration) error - - // ExitCode returns the exit code of the process. The process must have - // already terminated. - ExitCode() (int, error) - - // ResizeConsole resizes the console of the process. - ResizeConsole(width, height uint16) error - - // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing - // these pipes does not close the underlying pipes; it should be possible to - // call this multiple times to get multiple interfaces. - Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) - - // CloseStdin closes the write side of the stdin pipe so that the process is - // notified on the read side that there is no more data in stdin. - CloseStdin() error - - // Close cleans up any state associated with the process but does not kill - // or wait on it. - Close() error -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go deleted file mode 100644 index 4a4fcea..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go +++ /dev/null @@ -1,110 +0,0 @@ -package cni - -import ( - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/regstate" -) - -const ( - cniRoot = "cni" - cniKey = "cfg" -) - -// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM -// map. -type PersistedNamespaceConfig struct { - namespaceID string - stored bool - - ContainerID string - HostUniqueID guid.GUID -} - -// NewPersistedNamespaceConfig creates an in-memory namespace config that can be -// persisted to the registry. -func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig { - return &PersistedNamespaceConfig{ - namespaceID: namespaceID, - ContainerID: containerID, - HostUniqueID: containerHostUniqueID, - } -} - -// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches -// `namespaceID`. If not found returns `regstate.NotFoundError` -func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) { - sk, err := regstate.Open(cniRoot, false) - if err != nil { - return nil, err - } - defer sk.Close() - - pnc := PersistedNamespaceConfig{ - namespaceID: namespaceID, - stored: true, - } - if err := sk.Get(namespaceID, cniKey, &pnc); err != nil { - return nil, err - } - return &pnc, nil -} - -// Store stores or updates the in-memory config to its registry state. If the -// store failes returns the store error. -func (pnc *PersistedNamespaceConfig) Store() error { - if pnc.namespaceID == "" { - return errors.New("invalid namespaceID ''") - } - if pnc.ContainerID == "" { - return errors.New("invalid containerID ''") - } - empty := guid.GUID{} - if pnc.HostUniqueID == empty { - return errors.New("invalid containerHostUniqueID 'empy'") - } - sk, err := regstate.Open(cniRoot, false) - if err != nil { - return err - } - defer sk.Close() - - if pnc.stored { - if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil { - return err - } - } else { - if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil { - return err - } - } - pnc.stored = true - return nil -} - -// Remove removes any persisted state associated with this config. If the config -// is not found in the registery `Remove` returns no error. -func (pnc *PersistedNamespaceConfig) Remove() error { - if pnc.stored { - sk, err := regstate.Open(cniRoot, false) - if err != nil { - if regstate.IsNotFoundError(err) { - pnc.stored = false - return nil - } - return err - } - defer sk.Close() - - if err := sk.Remove(pnc.namespaceID); err != nil { - if regstate.IsNotFoundError(err) { - pnc.stored = false - return nil - } - return err - } - } - pnc.stored = false - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go deleted file mode 100644 index 27a62a7..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ /dev/null @@ -1,91 +0,0 @@ -package cow - -import ( - "context" - "io" - - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// Process is the interface for an OS process running in a container or utility VM. -type Process interface { - // Close releases resources associated with the process and closes the - // writer and readers returned by Stdio. Depending on the implementation, - // this may also terminate the process. - Close() error - // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever - // is appropriate to indicate that no more data is available. - CloseStdin(ctx context.Context) error - // CloseStdout closes the stdout connection to the process. It is used to indicate - // that we are done receiving output on the shim side. - CloseStdout(ctx context.Context) error - // CloseStderr closes the stderr connection to the process. It is used to indicate - // that we are done receiving output on the shim side. - CloseStderr(ctx context.Context) error - // Pid returns the process ID. - Pid() int - // Stdio returns the stdio streams for a process. These may be nil if a stream - // was not requested during CreateProcess. - Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) - // ResizeConsole resizes the virtual terminal associated with the process. - ResizeConsole(ctx context.Context, width, height uint16) error - // Kill sends a SIGKILL or equivalent signal to the process and returns whether - // the signal was delivered. It does not wait for the process to terminate. - Kill(ctx context.Context) (bool, error) - // Signal sends a signal to the process and returns whether the signal was - // delivered. The input is OS specific (either - // guestrequest.SignalProcessOptionsWCOW or - // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process - // to terminate. - Signal(ctx context.Context, options interface{}) (bool, error) - // Wait waits for the process to complete, or for a connection to the process to be - // terminated by some error condition (including calling Close). - Wait() error - // ExitCode returns the exit code of the process. Returns an error if the process is - // not running. - ExitCode() (int, error) -} - -// ProcessHost is the interface for creating processes. -type ProcessHost interface { - // CreateProcess creates a process. The configuration is host specific - // (either hcsschema.ProcessParameters or lcow.ProcessParameters). - CreateProcess(ctx context.Context, config interface{}) (Process, error) - // OS returns the host's operating system, "linux" or "windows". - OS() string - // IsOCI specifies whether this is an OCI-compliant process host. If true, - // then the configuration passed to CreateProcess should have an OCI process - // spec (or nil if this is the initial process in an OCI container). - // Otherwise, it should have the HCS-specific process parameters. - IsOCI() bool -} - -// Container is the interface for container objects, either running on the host or -// in a utility VM. -type Container interface { - ProcessHost - // Close releases the resources associated with the container. Depending on - // the implementation, this may also terminate the container. - Close() error - // ID returns the container ID. - ID() string - // Properties returns the requested container properties targeting a V1 schema container. - Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) - // PropertiesV2 returns the requested container properties targeting a V2 schema container. - PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) - // Start starts a container. - Start(ctx context.Context) error - // Shutdown sends a shutdown request to the container (but does not wait for - // the shutdown to complete). - Shutdown(ctx context.Context) error - // Terminate sends a terminate request to the container (but does not wait - // for the terminate to complete). - Terminate(ctx context.Context) error - // Wait waits for the container to terminate, or for the connection to the - // container to be terminated by some error condition (including calling - // Close). - Wait() error - // Modify sends a request to modify container resources - Modify(ctx context.Context, config interface{}) error -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go deleted file mode 100644 index d13772b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ /dev/null @@ -1,161 +0,0 @@ -package hcs - -import ( - "fmt" - "sync" - "syscall" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "github.com/sirupsen/logrus" -) - -var ( - nextCallback uintptr - callbackMap = map[uintptr]*notificationWatcherContext{} - callbackMapLock = sync.RWMutex{} - - notificationWatcherCallback = syscall.NewCallback(notificationWatcher) - - // Notifications for HCS_SYSTEM handles - hcsNotificationSystemExited hcsNotification = 0x00000001 - hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 - hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 - hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 - hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 - hcsNotificationSystemCrashReport hcsNotification = 0x00000006 - hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 - hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 - hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 - hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A - hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B - hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C - hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D - hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E - - // Notifications for HCS_PROCESS handles - hcsNotificationProcessExited hcsNotification = 0x00010000 - - // Common notifications - hcsNotificationInvalid hcsNotification = 0x00000000 - hcsNotificationServiceDisconnect hcsNotification = 0x01000000 -) - -type hcsNotification uint32 - -func (hn hcsNotification) String() string { - switch hn { - case hcsNotificationSystemExited: - return "SystemExited" - case hcsNotificationSystemCreateCompleted: - return "SystemCreateCompleted" - case hcsNotificationSystemStartCompleted: - return "SystemStartCompleted" - case hcsNotificationSystemPauseCompleted: - return "SystemPauseCompleted" - case hcsNotificationSystemResumeCompleted: - return "SystemResumeCompleted" - case hcsNotificationSystemCrashReport: - return "SystemCrashReport" - case hcsNotificationSystemSiloJobCreated: - return "SystemSiloJobCreated" - case hcsNotificationSystemSaveCompleted: - return "SystemSaveCompleted" - case hcsNotificationSystemRdpEnhancedModeStateChanged: - return "SystemRdpEnhancedModeStateChanged" - case hcsNotificationSystemShutdownFailed: - return "SystemShutdownFailed" - case hcsNotificationSystemGetPropertiesCompleted: - return "SystemGetPropertiesCompleted" - case hcsNotificationSystemModifyCompleted: - return "SystemModifyCompleted" - case hcsNotificationSystemCrashInitiated: - return "SystemCrashInitiated" - case hcsNotificationSystemGuestConnectionClosed: - return "SystemGuestConnectionClosed" - case hcsNotificationProcessExited: - return "ProcessExited" - case hcsNotificationInvalid: - return "Invalid" - case hcsNotificationServiceDisconnect: - return "ServiceDisconnect" - default: - return fmt.Sprintf("Unknown: %d", hn) - } -} - -type notificationChannel chan error - -type notificationWatcherContext struct { - channels notificationChannels - handle vmcompute.HcsCallback - - systemID string - processID int -} - -type notificationChannels map[hcsNotification]notificationChannel - -func newSystemChannels() notificationChannels { - channels := make(notificationChannels) - for _, notif := range []hcsNotification{ - hcsNotificationServiceDisconnect, - hcsNotificationSystemExited, - hcsNotificationSystemCreateCompleted, - hcsNotificationSystemStartCompleted, - hcsNotificationSystemPauseCompleted, - hcsNotificationSystemResumeCompleted, - hcsNotificationSystemSaveCompleted, - } { - channels[notif] = make(notificationChannel, 1) - } - return channels -} - -func newProcessChannels() notificationChannels { - channels := make(notificationChannels) - for _, notif := range []hcsNotification{ - hcsNotificationServiceDisconnect, - hcsNotificationProcessExited, - } { - channels[notif] = make(notificationChannel, 1) - } - return channels -} - -func closeChannels(channels notificationChannels) { - for _, c := range channels { - close(c) - } -} - -func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { - var result error - if int32(notificationStatus) < 0 { - result = interop.Win32FromHresult(notificationStatus) - } - - callbackMapLock.RLock() - context := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if context == nil { - return 0 - } - - log := logrus.WithFields(logrus.Fields{ - "notification-type": notificationType.String(), - "system-id": context.systemID, - }) - if context.processID != 0 { - log.Data[logfields.ProcessID] = context.processID - } - log.Debug("HCS notification") - - if channel, ok := context.channels[notificationType]; ok { - channel <- result - } - - return 0 -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go deleted file mode 100644 index 644f0ab..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ /dev/null @@ -1,327 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net" - "syscall" - - "github.com/Microsoft/hcsshim/internal/log" -) - -var ( - // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists - ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrElementNotFound = syscall.Errno(0x490) - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrNotSupported = syscall.Errno(0x32) - - // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported - // decimal -2147024883 / hex 0x8007000d - ErrInvalidData = syscall.Errno(0xd) - - // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed - ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") - - // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method - ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") - - // ErrInvalidNotificationType is an error encountered when an invalid notification type is used - ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") - - // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation - ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") - - // ErrTimeout is an error encountered when waiting on a notification times out - ErrTimeout = errors.New("hcsshim: timeout waiting for notification") - - // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for - // a different expected notification - ErrUnexpectedContainerExit = errors.New("unexpected container exit") - - // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service - // is lost while waiting for a notification - ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") - - // ErrUnexpectedValue is an error encountered when hcs returns an invalid value - ErrUnexpectedValue = errors.New("unexpected value returned from hcs") - - // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container - ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) - - // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously - ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) - - // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation - ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) - - // ErrProcNotFound is an error encountered when a procedure look up fails. - ErrProcNotFound = syscall.Errno(0x7f) - - // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 - // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. - ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) - - // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management - ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) - - // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message - ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) - - // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly - ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) - - // ErrNotSupported is an error encountered when hcs doesn't support the request - ErrPlatformNotSupported = errors.New("unsupported platform request") -) - -type ErrorEvent struct { - Message string `json:"Message,omitempty"` // Fully formated error message - StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form - Provider string `json:"Provider,omitempty"` - EventID uint16 `json:"EventId,omitempty"` - Flags uint32 `json:"Flags,omitempty"` - Source string `json:"Source,omitempty"` - //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) -} - -type hcsResult struct { - Error int32 - ErrorMessage string - ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` -} - -func (ev *ErrorEvent) String() string { - evs := "[Event Detail: " + ev.Message - if ev.StackTrace != "" { - evs += " Stack Trace: " + ev.StackTrace - } - if ev.Provider != "" { - evs += " Provider: " + ev.Provider - } - if ev.EventID != 0 { - evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) - } - if ev.Flags != 0 { - evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) - } - if ev.Source != "" { - evs += " Source: " + ev.Source - } - evs += "]" - return evs -} - -func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { - if resultJSON != "" { - result := &hcsResult{} - if err := json.Unmarshal([]byte(resultJSON), result); err != nil { - log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") - return nil - } - return result.ErrorEvents - } - return nil -} - -type HcsError struct { - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &HcsError{} - -func (e *HcsError) Error() string { - s := e.Op + ": " + e.Err.Error() - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *HcsError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() -} - -func (e *HcsError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -// ProcessError is an error encountered in HCS during an operation on a Process object -type ProcessError struct { - SystemID string - Pid int - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &ProcessError{} - -// SystemError is an error encountered in HCS during an operation on a Container object -type SystemError struct { - ID string - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &SystemError{} - -func (e *SystemError) Error() string { - s := e.Op + " " + e.ID + ": " + e.Err.Error() - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *SystemError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() -} - -func (e *SystemError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { - // Don't double wrap errors - if _, ok := err.(*SystemError); ok { - return err - } - return &SystemError{ - ID: system.ID(), - Op: op, - Err: err, - Events: events, - } -} - -func (e *ProcessError) Error() string { - s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *ProcessError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() -} - -func (e *ProcessError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { - // Don't double wrap errors - if _, ok := err.(*ProcessError); ok { - return err - } - return &ProcessError{ - Pid: process.Pid(), - SystemID: process.SystemID(), - Op: op, - Err: err, - Events: events, - } -} - -// IsNotExist checks if an error is caused by the Container or Process not existing. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsNotExist(err error) bool { - err = getInnerError(err) - return err == ErrComputeSystemDoesNotExist || - err == ErrElementNotFound -} - -// IsAlreadyClosed checks if an error is caused by the Container or Process having been -// already closed by a call to the Close() method. -func IsAlreadyClosed(err error) bool { - err = getInnerError(err) - return err == ErrAlreadyClosed -} - -// IsPending returns a boolean indicating whether the error is that -// the requested operation is being completed in the background. -func IsPending(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationPending -} - -// IsTimeout returns a boolean indicating whether the error is caused by -// a timeout waiting for the operation to complete. -func IsTimeout(err error) bool { - if err, ok := err.(net.Error); ok && err.Timeout() { - return true - } - err = getInnerError(err) - return err == ErrTimeout -} - -// IsAlreadyStopped returns a boolean indicating whether the error is caused by -// a Container or Process being already stopped. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsAlreadyStopped(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeAlreadyStopped || - err == ErrElementNotFound -} - -// IsNotSupported returns a boolean indicating whether the error is caused by -// unsupported platform requests -// Note: Currently Unsupported platform requests can be mean either -// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage -// is thrown from the Platform -func IsNotSupported(err error) bool { - err = getInnerError(err) - // If Platform doesn't recognize or support the request sent, below errors are seen - return err == ErrVmcomputeInvalidJSON || - err == ErrInvalidData || - err == ErrNotSupported || - err == ErrVmcomputeUnknownMessage -} - -// IsOperationInvalidState returns true when err is caused by -// `ErrVmcomputeOperationInvalidState`. -func IsOperationInvalidState(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationInvalidState -} - -// IsAccessIsDenied returns true when err is caused by -// `ErrVmcomputeOperationAccessIsDenied`. -func IsAccessIsDenied(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationAccessIsDenied -} - -func getInnerError(err error) error { - switch pe := err.(type) { - case nil: - return nil - case *HcsError: - err = pe.Err - case *SystemError: - err = pe.Err - case *ProcessError: - err = pe.Err - } - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go deleted file mode 100644 index 8f20346..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ /dev/null @@ -1,521 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - "io" - "sync" - "syscall" - "time" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "go.opencensus.io/trace" -) - -// ContainerError is an error encountered in HCS -type Process struct { - handleLock sync.RWMutex - handle vmcompute.HcsProcess - processID int - system *System - hasCachedStdio bool - stdioLock sync.Mutex - stdin io.WriteCloser - stdout io.ReadCloser - stderr io.ReadCloser - callbackNumber uintptr - - closedWaitOnce sync.Once - waitBlock chan struct{} - exitCode int - waitError error -} - -func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { - return &Process{ - handle: process, - processID: processID, - system: computeSystem, - waitBlock: make(chan struct{}), - } -} - -type processModifyRequest struct { - Operation string - ConsoleSize *consoleSize `json:",omitempty"` - CloseHandle *closeHandle `json:",omitempty"` -} - -type consoleSize struct { - Height uint16 - Width uint16 -} - -type closeHandle struct { - Handle string -} - -type processStatus struct { - ProcessID uint32 - Exited bool - ExitCode uint32 - LastWaitResult int32 -} - -const stdIn string = "StdIn" - -const ( - modifyConsoleSize string = "ConsoleSize" - modifyCloseHandle string = "CloseHandle" -) - -// Pid returns the process ID of the process within the container. -func (process *Process) Pid() int { - return process.processID -} - -// SystemID returns the ID of the process's compute system. -func (process *Process) SystemID() string { - return process.system.ID() -} - -func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { - case nil: - return true, nil - case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: - select { - case <-process.waitBlock: - // The process exit notification has already arrived. - default: - // The process should be gone, but we have not received the notification. - // After a second, force unblock the process wait to work around a possible - // deadlock in the HCS. - go func() { - time.Sleep(time.Second) - process.closedWaitOnce.Do(func() { - log.G(ctx).WithError(err).Warn("force unblocking process waits") - process.exitCode = -1 - process.waitError = err - close(process.waitBlock) - }) - }() - } - return false, nil - default: - return false, err - } -} - -// Signal signals the process with `options`. -// -// For LCOW `guestrequest.SignalProcessOptionsLCOW`. -// -// For WCOW `guestrequest.SignalProcessOptionsWCOW`. -func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::Signal" - - if process.handle == 0 { - return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - optionsb, err := json.Marshal(options) - if err != nil { - return false, err - } - - resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) - events := processHcsResult(ctx, resultJSON) - delivered, err := process.processSignalResult(ctx, err) - if err != nil { - err = makeProcessError(process, operation, err, events) - } - return delivered, err -} - -// Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *Process) Kill(ctx context.Context) (bool, error) { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::Kill" - - if process.handle == 0 { - return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) - events := processHcsResult(ctx, resultJSON) - delivered, err := process.processSignalResult(ctx, err) - if err != nil { - err = makeProcessError(process, operation, err, events) - } - return delivered, err -} - -// waitBackground waits for the process exit notification. Once received sets -// `process.waitError` (if any) and unblocks all `Wait` calls. -// -// This MUST be called exactly once per `process.handle` but `Wait` is safe to -// call multiple times. -func (process *Process) waitBackground() { - operation := "hcs::Process::waitBackground" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - var ( - err error - exitCode = -1 - propertiesJSON string - resultJSON string - ) - - err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) - if err != nil { - err = makeProcessError(process, operation, err, nil) - log.G(ctx).WithError(err).Error("failed wait") - } else { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - // Make sure we didnt race with Close() here - if process.handle != 0 { - propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) - events := processHcsResult(ctx, resultJSON) - if err != nil { - err = makeProcessError(process, operation, err, events) //nolint:ineffassign - } else { - properties := &processStatus{} - err = json.Unmarshal([]byte(propertiesJSON), properties) - if err != nil { - err = makeProcessError(process, operation, err, nil) //nolint:ineffassign - } else { - if properties.LastWaitResult != 0 { - log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") - } else { - exitCode = int(properties.ExitCode) - } - } - } - } - } - log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") - - process.closedWaitOnce.Do(func() { - process.exitCode = exitCode - process.waitError = err - close(process.waitBlock) - }) - oc.SetSpanStatus(span, err) -} - -// Wait waits for the process to exit. If the process has already exited returns -// the pervious error (if any). -func (process *Process) Wait() error { - <-process.waitBlock - return process.waitError -} - -// ResizeConsole resizes the console of the process. -func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::ResizeConsole" - - if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - modifyRequest := processModifyRequest{ - Operation: modifyConsoleSize, - ConsoleSize: &consoleSize{ - Height: height, - Width: width, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeProcessError(process, operation, err, events) - } - - return nil -} - -// ExitCode returns the exit code of the process. The process must have -// already terminated. -func (process *Process) ExitCode() (int, error) { - select { - case <-process.waitBlock: - if process.waitError != nil { - return -1, process.waitError - } - return process.exitCode, nil - default: - return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) - } -} - -// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes. Once returned, these pipes -// are the responsibility of the caller to close. -func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { - operation := "hcs::Process::StdioLegacy" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - if process.handle == 0 { - return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.hasCachedStdio { - stdin, stdout, stderr := process.stdin, process.stdout, process.stderr - process.stdin, process.stdout, process.stderr = nil, nil, nil - process.hasCachedStdio = false - return stdin, stdout, stderr, nil - } - - processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, err, events) - } - - pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, err, nil) - } - - return pipes[0], pipes[1], pipes[2], nil -} - -// Stdio returns the stdin, stdout, and stderr pipes, respectively. -// To close them, close the process handle. -func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - return process.stdin, process.stdout, process.stderr -} - -// CloseStdin closes the write side of the stdin pipe so that the process is -// notified on the read side that there is no more data in stdin. -func (process *Process) CloseStdin(ctx context.Context) error { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::CloseStdin" - - if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - modifyRequest := processModifyRequest{ - Operation: modifyCloseHandle, - CloseHandle: &closeHandle{ - Handle: stdIn, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeProcessError(process, operation, err, events) - } - - process.stdioLock.Lock() - if process.stdin != nil { - process.stdin.Close() - process.stdin = nil - } - process.stdioLock.Unlock() - - return nil -} - -func (process *Process) CloseStdout(ctx context.Context) (err error) { - ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.stdout != nil { - process.stdout.Close() - process.stdout = nil - } - return nil -} - -func (process *Process) CloseStderr(ctx context.Context) (err error) { - ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.stderr != nil { - process.stderr.Close() - process.stderr = nil - - } - return nil -} - -// Close cleans up any state associated with the process but does not kill -// or wait on it. -func (process *Process) Close() (err error) { - operation := "hcs::Process::Close" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - // Don't double free this - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - if process.stdin != nil { - process.stdin.Close() - process.stdin = nil - } - if process.stdout != nil { - process.stdout.Close() - process.stdout = nil - } - if process.stderr != nil { - process.stderr.Close() - process.stderr = nil - } - process.stdioLock.Unlock() - - if err = process.unregisterCallback(ctx); err != nil { - return makeProcessError(process, operation, err, nil) - } - - if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { - return makeProcessError(process, operation, err, nil) - } - - process.handle = 0 - process.closedWaitOnce.Do(func() { - process.exitCode = -1 - process.waitError = ErrAlreadyClosed - close(process.waitBlock) - }) - - return nil -} - -func (process *Process) registerCallback(ctx context.Context) error { - callbackContext := ¬ificationWatcherContext{ - channels: newProcessChannels(), - systemID: process.SystemID(), - processID: process.processID, - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = callbackContext - callbackMapLock.Unlock() - - callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) - if err != nil { - return err - } - callbackContext.handle = callbackHandle - process.callbackNumber = callbackNumber - - return nil -} - -func (process *Process) unregisterCallback(ctx context.Context) error { - callbackNumber := process.callbackNumber - - callbackMapLock.RLock() - callbackContext := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if callbackContext == nil { - return nil - } - - handle := callbackContext.handle - - if handle == 0 { - return nil - } - - // vmcompute.HcsUnregisterProcessCallback has its own synchronization to - // wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) - if err != nil { - return err - } - - closeChannels(callbackContext.channels) - - callbackMapLock.Lock() - delete(callbackMap, callbackNumber) - callbackMapLock.Unlock() - - handle = 0 //nolint:ineffassign - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go deleted file mode 100644 index b621c55..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go +++ /dev/null @@ -1,250 +0,0 @@ -package schema1 - -import ( - "encoding/json" - "time" - - "github.com/Microsoft/go-winio/pkg/guid" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// ProcessConfig is used as both the input of Container.CreateProcess -// and to convert the parameters to JSON for passing onto the HCS -type ProcessConfig struct { - ApplicationName string `json:",omitempty"` - CommandLine string `json:",omitempty"` - CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows - User string `json:",omitempty"` - WorkingDirectory string `json:",omitempty"` - Environment map[string]string `json:",omitempty"` - EmulateConsole bool `json:",omitempty"` - CreateStdInPipe bool `json:",omitempty"` - CreateStdOutPipe bool `json:",omitempty"` - CreateStdErrPipe bool `json:",omitempty"` - ConsoleSize [2]uint `json:",omitempty"` - CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows - OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows -} - -type Layer struct { - ID string - Path string -} - -type MappedDir struct { - HostPath string - ContainerPath string - ReadOnly bool - BandwidthMaximum uint64 - IOPSMaximum uint64 - CreateInUtilityVM bool - // LinuxMetadata - Support added in 1803/RS4+. - LinuxMetadata bool `json:",omitempty"` -} - -type MappedPipe struct { - HostPath string - ContainerPipeName string -} - -type HvRuntime struct { - ImagePath string `json:",omitempty"` - SkipTemplate bool `json:",omitempty"` - LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM - LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM - LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode - BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD - WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD -} - -type MappedVirtualDisk struct { - HostPath string `json:",omitempty"` // Path to VHD on the host - ContainerPath string // Platform-specific mount point path in the container - CreateInUtilityVM bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" - AttachOnly bool `json:",omitempty"` -} - -// AssignedDevice represents a device that has been directly assigned to a container -// -// NOTE: Support added in RS5 -type AssignedDevice struct { - // InterfaceClassGUID of the device to assign to container. - InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` -} - -// ContainerConfig is used as both the input of CreateContainer -// and to convert the parameters to JSON for passing onto the HCS -type ContainerConfig struct { - SystemType string // HCS requires this to be hard-coded to "Container" - Name string // Name of the container. We use the docker ID. - Owner string `json:",omitempty"` // The management platform that created this container - VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} - IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows - LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID - Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID - Credentials string `json:",omitempty"` // Credentials information - ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. - ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. - ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. - StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS - StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second - StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller - MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes - HostName string `json:",omitempty"` // Hostname - MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) - MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes - HvPartition bool // True if it a Hyper-V Container - NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. - EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container - HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM - Servicing bool `json:",omitempty"` // True if this container is for servicing - AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution - DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution - ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. - TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed - MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start - AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 -} - -type ComputeSystemQuery struct { - IDs []string `json:"Ids,omitempty"` - Types []string `json:",omitempty"` - Names []string `json:",omitempty"` - Owners []string `json:",omitempty"` -} - -type PropertyType string - -const ( - PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 - PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 - PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call - PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 -) - -type PropertyQuery struct { - PropertyTypes []PropertyType `json:",omitempty"` -} - -// ContainerProperties holds the properties for a container and the processes running in that container -type ContainerProperties struct { - ID string `json:"Id"` - State string - Name string - SystemType string - RuntimeOSType string `json:"RuntimeOsType,omitempty"` - Owner string - SiloGUID string `json:"SiloGuid,omitempty"` - RuntimeID guid.GUID `json:"RuntimeId,omitempty"` - IsRuntimeTemplate bool `json:",omitempty"` - RuntimeImagePath string `json:",omitempty"` - Stopped bool `json:",omitempty"` - ExitType string `json:",omitempty"` - AreUpdatesPending bool `json:",omitempty"` - ObRoot string `json:",omitempty"` - Statistics Statistics `json:",omitempty"` - ProcessList []ProcessListItem `json:",omitempty"` - MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` - GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` -} - -// MemoryStats holds the memory statistics for a container -type MemoryStats struct { - UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` - UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` - UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` -} - -// ProcessorStats holds the processor statistics for a container -type ProcessorStats struct { - TotalRuntime100ns uint64 `json:",omitempty"` - RuntimeUser100ns uint64 `json:",omitempty"` - RuntimeKernel100ns uint64 `json:",omitempty"` -} - -// StorageStats holds the storage statistics for a container -type StorageStats struct { - ReadCountNormalized uint64 `json:",omitempty"` - ReadSizeBytes uint64 `json:",omitempty"` - WriteCountNormalized uint64 `json:",omitempty"` - WriteSizeBytes uint64 `json:",omitempty"` -} - -// NetworkStats holds the network statistics for a container -type NetworkStats struct { - BytesReceived uint64 `json:",omitempty"` - BytesSent uint64 `json:",omitempty"` - PacketsReceived uint64 `json:",omitempty"` - PacketsSent uint64 `json:",omitempty"` - DroppedPacketsIncoming uint64 `json:",omitempty"` - DroppedPacketsOutgoing uint64 `json:",omitempty"` - EndpointId string `json:",omitempty"` - InstanceId string `json:",omitempty"` -} - -// Statistics is the structure returned by a statistics call on a container -type Statistics struct { - Timestamp time.Time `json:",omitempty"` - ContainerStartTime time.Time `json:",omitempty"` - Uptime100ns uint64 `json:",omitempty"` - Memory MemoryStats `json:",omitempty"` - Processor ProcessorStats `json:",omitempty"` - Storage StorageStats `json:",omitempty"` - Network []NetworkStats `json:",omitempty"` -} - -// ProcessList is the structure of an item returned by a ProcessList call on a container -type ProcessListItem struct { - CreateTimestamp time.Time `json:",omitempty"` - ImageName string `json:",omitempty"` - KernelTime100ns uint64 `json:",omitempty"` - MemoryCommitBytes uint64 `json:",omitempty"` - MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` - MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` - ProcessId uint32 `json:",omitempty"` - UserTime100ns uint64 `json:",omitempty"` -} - -// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container -type MappedVirtualDiskController struct { - MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` -} - -// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM -type GuestDefinedCapabilities struct { - NamespaceAddRequestSupported bool `json:",omitempty"` - SignalProcessSupported bool `json:",omitempty"` - DumpStacksSupported bool `json:",omitempty"` - DeleteContainerStateSupported bool `json:",omitempty"` - UpdateContainerSupported bool `json:",omitempty"` -} - -// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM -type GuestConnectionInfo struct { - SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` - ProtocolVersion uint32 `json:",omitempty"` - GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` -} - -// Type of Request Support in ModifySystem -type RequestType string - -// Type of Resource Support in ModifySystem -type ResourceType string - -// RequestType const -const ( - Add RequestType = "Add" - Remove RequestType = "Remove" - Network ResourceType = "Network" -) - -// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type ResourceModificationRequestResponse struct { - Resource ResourceType `json:"ResourceType"` - Data interface{} `json:"Settings"` - Request RequestType `json:"RequestType,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go deleted file mode 100644 index bcfeb34..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Attachment struct { - Type_ string `json:"Type,omitempty"` - - Path string `json:"Path,omitempty"` - - IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` - - CachingMode string `json:"CachingMode,omitempty"` - - NoWriteHardening bool `json:"NoWriteHardening,omitempty"` - - DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` - - IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` - - CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go deleted file mode 100644 index ecbbed4..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Battery struct { -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go deleted file mode 100644 index c1ea395..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CacheQueryStatsResponse struct { - L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` - - L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` - - L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go deleted file mode 100644 index ca75277..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Chipset struct { - Uefi *Uefi `json:"Uefi,omitempty"` - - IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` - - BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` - - ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` - - ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` - - UseUtc bool `json:"UseUtc,omitempty"` - - // LinuxKernelDirect - Added in v2.2 Builds >=181117 - LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go deleted file mode 100644 index b4f9c31..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CloseHandle struct { - Handle string `json:"Handle,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go deleted file mode 100644 index 8bf8cab..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. -type ComPort struct { - NamedPipe string `json:"NamedPipe,omitempty"` - - OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go deleted file mode 100644 index 10cea67..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ComputeSystem struct { - Owner string `json:"Owner,omitempty"` - - SchemaVersion *Version `json:"SchemaVersion,omitempty"` - - HostingSystemId string `json:"HostingSystemId,omitempty"` - - HostedSystem interface{} `json:"HostedSystem,omitempty"` - - Container *Container `json:"Container,omitempty"` - - VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` - - ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go deleted file mode 100644 index 1d5dfe6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - "net/http" -) - -// contextKeys are used to identify the type of value in the context. -// Since these are string, it is possible to get a short description of the -// context key for logging and debugging using key.String(). - -type contextKey string - -func (c contextKey) String() string { - return "auth " + string(c) -} - -var ( - // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. - ContextOAuth2 = contextKey("token") - - // ContextBasicAuth takes BasicAuth as authentication for the request. - ContextBasicAuth = contextKey("basic") - - // ContextAccessToken takes a string oauth2 access token as authentication for the request. - ContextAccessToken = contextKey("accesstoken") - - // ContextAPIKey takes an APIKey as authentication for the request - ContextAPIKey = contextKey("apikey") -) - -// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth -type BasicAuth struct { - UserName string `json:"userName,omitempty"` - Password string `json:"password,omitempty"` -} - -// APIKey provides API key based authentication to a request passed via context using ContextAPIKey -type APIKey struct { - Key string - Prefix string -} - -type Configuration struct { - BasePath string `json:"basePath,omitempty"` - Host string `json:"host,omitempty"` - Scheme string `json:"scheme,omitempty"` - DefaultHeader map[string]string `json:"defaultHeader,omitempty"` - UserAgent string `json:"userAgent,omitempty"` - HTTPClient *http.Client -} - -func NewConfiguration() *Configuration { - cfg := &Configuration{ - BasePath: "https://localhost", - DefaultHeader: make(map[string]string), - UserAgent: "Swagger-Codegen/2.1.0/go", - } - return cfg -} - -func (c *Configuration) AddDefaultHeader(key string, value string) { - c.DefaultHeader[key] = value -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go deleted file mode 100644 index 68aa04a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ConsoleSize struct { - Height int32 `json:"Height,omitempty"` - - Width int32 `json:"Width,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go deleted file mode 100644 index 4fb2310..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Container struct { - GuestOs *GuestOs `json:"GuestOs,omitempty"` - - Storage *Storage `json:"Storage,omitempty"` - - MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` - - MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` - - Memory *Memory `json:"Memory,omitempty"` - - Processor *Processor `json:"Processor,omitempty"` - - Networking *Networking `json:"Networking,omitempty"` - - HvSocket *HvSocket `json:"HvSocket,omitempty"` - - ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` - - RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - - AssignedDevices []Device `json:"AssignedDevices,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go deleted file mode 100644 index 495c6eb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardAddInstanceRequest struct { - Id string `json:"Id,omitempty"` - CredentialSpec string `json:"CredentialSpec,omitempty"` - Transport string `json:"Transport,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go deleted file mode 100644 index 1ed4c00..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardHvSocketServiceConfig struct { - ServiceId string `json:"ServiceId,omitempty"` - ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go deleted file mode 100644 index d7ebd0f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardInstance struct { - Id string `json:"Id,omitempty"` - CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` - HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go deleted file mode 100644 index 71005b0..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardModifyOperation string - -const ( - AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" - RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go deleted file mode 100644 index 952cda4..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardOperationRequest struct { - Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` - OperationDetails interface{} `json:"OperationDetails,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go deleted file mode 100644 index 32e5a3b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardRemoveInstanceRequest struct { - Id string `json:"Id,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go deleted file mode 100644 index 0f8f644..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardState struct { - - // Authentication cookie for calls to a Container Credential Guard instance. - Cookie string `json:"Cookie,omitempty"` - - // Name of the RPC endpoint of the Container Credential Guard instance. - RpcEndpoint string `json:"RpcEndpoint,omitempty"` - - // Transport used for the configured Container Credential Guard instance. - Transport string `json:"Transport,omitempty"` - - // Credential spec used for the configured Container Credential Guard instance. - CredentialSpec string `json:"CredentialSpec,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go deleted file mode 100644 index ea306fa..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardSystemInfo struct { - Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go deleted file mode 100644 index 1fd7ca5..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// memory usage as viewed from within the container -type ContainerMemoryInformation struct { - TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` - - TotalUsage int32 `json:"TotalUsage,omitempty"` - - CommittedBytes int32 `json:"CommittedBytes,omitempty"` - - SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` - - CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` - - PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go deleted file mode 100644 index 90332a5..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines -type CpuGroup struct { - Id string `json:"Id,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go deleted file mode 100644 index 8794961..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CpuGroupAffinity struct { - LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` - LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go deleted file mode 100644 index f1a28cd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CpuGroupConfig struct { - GroupId string `json:"GroupId,omitempty"` - Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` - GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` - // Hypervisor CPU group IDs exposed to clients - HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go deleted file mode 100644 index 3ace0cc..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Structure used to return cpu groups for a Service property query -type CpuGroupConfigurations struct { - CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go deleted file mode 100644 index 7d89780..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CPUGroupOperation string - -const ( - CreateGroup CPUGroupOperation = "CreateGroup" - DeleteGroup CPUGroupOperation = "DeleteGroup" - SetProperty CPUGroupOperation = "SetProperty" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go deleted file mode 100644 index bbad6a2..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CpuGroupProperty struct { - PropertyCode uint32 `json:"PropertyCode,omitempty"` - PropertyValue uint32 `json:"PropertyValue,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go deleted file mode 100644 index 91a8278..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Create group operation settings -type CreateGroupOperation struct { - GroupId string `json:"GroupId,omitempty"` - LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` - LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go deleted file mode 100644 index 134bd98..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Delete group operation settings -type DeleteGroupOperation struct { - GroupId string `json:"GroupId,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go deleted file mode 100644 index 107cadd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type DeviceType string - -const ( - ClassGUID DeviceType = "ClassGuid" - DeviceInstance DeviceType = "DeviceInstance" - GPUMirror DeviceType = "GpuMirror" -) - -type Device struct { - // The type of device to assign to the container. - Type DeviceType `json:"Type,omitempty"` - // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. - InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` - // The location path of the device to assign to the container. Only used when Type is DeviceInstance. - LocationPath string `json:"LocationPath,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go deleted file mode 100644 index e985d96..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Devices struct { - ComPorts map[string]ComPort `json:"ComPorts,omitempty"` - - Scsi map[string]Scsi `json:"Scsi,omitempty"` - - VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` - - NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` - - VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` - - Keyboard *Keyboard `json:"Keyboard,omitempty"` - - Mouse *Mouse `json:"Mouse,omitempty"` - - HvSocket *HvSocket2 `json:"HvSocket,omitempty"` - - EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` - - GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` - - VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` - - Plan9 *Plan9 `json:"Plan9,omitempty"` - - Battery *Battery `json:"Battery,omitempty"` - - FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` - - SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` - - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - VirtualPci map[string]VirtualPciDevice `json:",omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go deleted file mode 100644 index 85450c4..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type EnhancedModeVideo struct { - ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go deleted file mode 100644 index fe86cab..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type FlexibleIoDevice struct { - EmulatorId string `json:"EmulatorId,omitempty"` - - HostingModel string `json:"HostingModel,omitempty"` - - Configuration []string `json:"Configuration,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go deleted file mode 100644 index 7db2949..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestConnection struct { - - // Use Vsock rather than Hyper-V sockets to communicate with the guest service. - UseVsock bool `json:"UseVsock,omitempty"` - - // Don't disconnect the guest connection when pausing the virtual machine. - UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go deleted file mode 100644 index 8a369ba..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Information about the guest. -type GuestConnectionInfo struct { - - // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. - SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` - - ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` - - GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go deleted file mode 100644 index af82800..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestCrashReporting struct { - WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go deleted file mode 100644 index 8838519..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestOs struct { - HostName string `json:"HostName,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go deleted file mode 100644 index ef1eec8..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestState struct { - - // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. - GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` - - // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. - RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` - - // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. - ForceTransientState bool `json:"ForceTransientState,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go deleted file mode 100644 index 2238ce5..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Structure used to request a service processor modification -type HostProcessorModificationRequest struct { - Operation CPUGroupOperation `json:"Operation,omitempty"` - OperationDetails interface{} `json:"OperationDetails,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go deleted file mode 100644 index ea3084b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type HostedSystem struct { - SchemaVersion *Version `json:"SchemaVersion,omitempty"` - - Container *Container `json:"Container,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go deleted file mode 100644 index 23b2ee9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type HvSocket struct { - Config *HvSocketSystemConfig `json:"Config,omitempty"` - - EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go deleted file mode 100644 index a017691..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// HvSocket configuration for a VM -type HvSocket2 struct { - HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go deleted file mode 100644 index 84c11b9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// This class defines address settings applied to a VM -// by the GCS every time a VM starts or restores. -type HvSocketAddress struct { - LocalAddress string `json:"LocalAddress,omitempty"` - ParentAddress string `json:"ParentAddress,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go deleted file mode 100644 index ecd9f7f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type HvSocketServiceConfig struct { - - // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. - BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` - - // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. - ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` - - // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors - AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` - - // Disabled controls whether the HvSocket service is accepting connection requests. - // This set to true will make the service refuse all incoming connections as well as cancel - // any connections already established. The service itself will still be active however - // and can be re-enabled at a future time. - Disabled bool `json:"Disabled,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go deleted file mode 100644 index 69f4f9d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. -type HvSocketSystemConfig struct { - - // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). - DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` - - // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. - DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` - - ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go deleted file mode 100644 index a614d63..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type InterruptModerationName string - -// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. -const ( - DefaultName InterruptModerationName = "Default" - AdaptiveName InterruptModerationName = "Adaptive" - OffName InterruptModerationName = "Off" - LowName InterruptModerationName = "Low" - MediumName InterruptModerationName = "Medium" - HighName InterruptModerationName = "High" -) - -type InterruptModerationValue uint32 - -const ( - DefaultValue InterruptModerationValue = iota - AdaptiveValue - OffValue - LowValue InterruptModerationValue = 100 - MediumValue InterruptModerationValue = 200 - HighValue InterruptModerationValue = 300 -) - -var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ - DefaultValue: DefaultName, - AdaptiveValue: AdaptiveName, - OffValue: OffName, - LowValue: LowName, - MediumValue: MediumName, - HighValue: HighName, -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go deleted file mode 100644 index 2a55cc3..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type IovSettings struct { - // The weight assigned to this port for I/O virtualization (IOV) offloading. - // Setting this to 0 disables IOV offloading. - OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` - - // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. - QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` - - // The interrupt moderation mode for I/O virtualization (IOV) offloading. - InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go deleted file mode 100644 index 3d3fa3b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Keyboard struct { -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go deleted file mode 100644 index 176c49d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Layer struct { - Id string `json:"Id,omitempty"` - - Path string `json:"Path,omitempty"` - - PathType string `json:"PathType,omitempty"` - - // Unspecified defaults to Enabled - Cache string `json:"Cache,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go deleted file mode 100644 index 0ab6c28..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.2 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type LinuxKernelDirect struct { - KernelFilePath string `json:"KernelFilePath,omitempty"` - - InitRdPath string `json:"InitRdPath,omitempty"` - - KernelCmdLine string `json:"KernelCmdLine,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go deleted file mode 100644 index 2e3aa5e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type LogicalProcessor struct { - LpIndex uint32 `json:"LpIndex,omitempty"` - NodeNumber uint8 `json:"NodeNumber,omitempty"` - PackageId uint32 `json:"PackageId,omitempty"` - CoreId uint32 `json:"CoreId,omitempty"` - RootVpIndex int32 `json:"RootVpIndex,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go deleted file mode 100644 index 9b86a40..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type MappedDirectory struct { - HostPath string `json:"HostPath,omitempty"` - - HostPathType string `json:"HostPathType,omitempty"` - - ContainerPath string `json:"ContainerPath,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go deleted file mode 100644 index 208074e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type MappedPipe struct { - ContainerPipeName string `json:"ContainerPipeName,omitempty"` - - HostPath string `json:"HostPath,omitempty"` - - HostPathType string `json:"HostPathType,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go deleted file mode 100644 index 30749c6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory struct { - SizeInMB uint64 `json:"SizeInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go deleted file mode 100644 index 71224c7..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory2 struct { - SizeInMB uint64 `json:"SizeInMB,omitempty"` - - AllowOvercommit bool `json:"AllowOvercommit,omitempty"` - - EnableHotHint bool `json:"EnableHotHint,omitempty"` - - EnableColdHint bool `json:"EnableColdHint,omitempty"` - - EnableEpf bool `json:"EnableEpf,omitempty"` - - // EnableDeferredCommit is private in the schema. If regenerated need to add back. - EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` - - // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed - // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by - // the guest operating system). - EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` - - // LowMmioGapInMB is the low MMIO region allocated below 4GB. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` - - // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and - // size). - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` - - // HighMmioGapInMB is the high MMIO region. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go deleted file mode 100644 index 811779b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type MemoryInformationForVm struct { - VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"` - - VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` - - VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go deleted file mode 100644 index 906ba59..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Memory runtime statistics -type MemoryStats struct { - MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` - - MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` - - MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go deleted file mode 100644 index 1384ed8..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ModificationRequest struct { - PropertyType PropertyType `json:"PropertyType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go deleted file mode 100644 index d29455a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ModifySettingRequest struct { - ResourcePath string `json:"ResourcePath,omitempty"` - - RequestType string `json:"RequestType,omitempty"` - - Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated - - GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go deleted file mode 100644 index ccf8b93..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Mouse struct { -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go deleted file mode 100644 index 7408abd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type NetworkAdapter struct { - EndpointId string `json:"EndpointId,omitempty"` - MacAddress string `json:"MacAddress,omitempty"` - // The I/O virtualization (IOV) offloading configuration. - IovSettings *IovSettings `json:"IovSettings,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go deleted file mode 100644 index e5ea187..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Networking struct { - AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` - - DnsSearchList string `json:"DnsSearchList,omitempty"` - - NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` - - // Guid in windows; string in linux - Namespace string `json:"Namespace,omitempty"` - - NetworkAdapters []string `json:"NetworkAdapters,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go deleted file mode 100644 index d96c950..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Notification data that is indicated to components running in the Virtual Machine. -type PauseNotification struct { - Reason string `json:"Reason,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go deleted file mode 100644 index 21707a8..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Options for HcsPauseComputeSystem -type PauseOptions struct { - SuspensionLevel string `json:"SuspensionLevel,omitempty"` - - HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go deleted file mode 100644 index 29d8c80..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Plan9 struct { - Shares []Plan9Share `json:"Shares,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go deleted file mode 100644 index 41f8fde..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Plan9Share struct { - Name string `json:"Name,omitempty"` - - // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. - AccessName string `json:"AccessName,omitempty"` - - Path string `json:"Path,omitempty"` - - Port int32 `json:"Port,omitempty"` - - // Flags are marked private. Until they are exported correctly - // - // ReadOnly 0x00000001 - // LinuxMetadata 0x00000004 - // CaseSensitive 0x00000008 - Flags int32 `json:"Flags,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` - - UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` - - AllowedFiles []string `json:"AllowedFiles,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go deleted file mode 100644 index e9a662d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - "time" -) - -// Information about a process running in a container -type ProcessDetails struct { - ProcessId int32 `json:"ProcessId,omitempty"` - - ImageName string `json:"ImageName,omitempty"` - - CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` - - UserTime100ns int32 `json:"UserTime100ns,omitempty"` - - KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` - - MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` - - MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` - - MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go deleted file mode 100644 index e4ed095..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Passed to HcsRpc_ModifyProcess -type ProcessModifyRequest struct { - Operation string `json:"Operation,omitempty"` - - ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` - - CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go deleted file mode 100644 index 82b0d05..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ProcessParameters struct { - ApplicationName string `json:"ApplicationName,omitempty"` - - CommandLine string `json:"CommandLine,omitempty"` - - // optional alternative to CommandLine, currently only supported by Linux GCS - CommandArgs []string `json:"CommandArgs,omitempty"` - - User string `json:"User,omitempty"` - - WorkingDirectory string `json:"WorkingDirectory,omitempty"` - - Environment map[string]string `json:"Environment,omitempty"` - - // if set, will run as low-privilege process - RestrictedToken bool `json:"RestrictedToken,omitempty"` - - // if set, ignore StdErrPipe - EmulateConsole bool `json:"EmulateConsole,omitempty"` - - CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` - - CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` - - CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` - - // height then width - ConsoleSize []int32 `json:"ConsoleSize,omitempty"` - - // if set, find an existing session for the user and create the process in it - UseExistingLogin bool `json:"UseExistingLogin,omitempty"` - - // if set, use the legacy console instead of conhost - UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go deleted file mode 100644 index ad9a4fa..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Status of a process running in a container -type ProcessStatus struct { - ProcessId int32 `json:"ProcessId,omitempty"` - - Exited bool `json:"Exited,omitempty"` - - ExitCode int32 `json:"ExitCode,omitempty"` - - LastWaitResult int32 `json:"LastWaitResult,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go deleted file mode 100644 index bb24e88..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Processor struct { - Count int32 `json:"Count,omitempty"` - - Maximum int32 `json:"Maximum,omitempty"` - - Weight int32 `json:"Weight,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go deleted file mode 100644 index c64f335..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.5 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Processor2 struct { - Count int32 `json:"Count,omitempty"` - - Limit int32 `json:"Limit,omitempty"` - - Weight int32 `json:"Weight,omitempty"` - - ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` - - // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. - CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go deleted file mode 100644 index 6157e25..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// CPU runtime statistics -type ProcessorStats struct { - TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` - - RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"` - - RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go deleted file mode 100644 index 885156e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ProcessorTopology struct { - LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` - LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go deleted file mode 100644 index 17558cb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - v1 "github.com/containerd/cgroups/stats/v1" -) - -type Properties struct { - Id string `json:"Id,omitempty"` - - SystemType string `json:"SystemType,omitempty"` - - RuntimeOsType string `json:"RuntimeOsType,omitempty"` - - Name string `json:"Name,omitempty"` - - Owner string `json:"Owner,omitempty"` - - RuntimeId string `json:"RuntimeId,omitempty"` - - RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` - - State string `json:"State,omitempty"` - - Stopped bool `json:"Stopped,omitempty"` - - ExitType string `json:"ExitType,omitempty"` - - Memory *MemoryInformationForVm `json:"Memory,omitempty"` - - Statistics *Statistics `json:"Statistics,omitempty"` - - ProcessList []ProcessDetails `json:"ProcessList,omitempty"` - - TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` - - HostingSystemId string `json:"HostingSystemId,omitempty"` - - SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` - - GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` - - // Metrics is not part of the API for HCS but this is used for LCOW v2 to - // return the full cgroup metrics from the guest. - Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go deleted file mode 100644 index d6d80df..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// By default the basic properties will be returned. This query provides a way to request specific properties. -type PropertyQuery struct { - PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go deleted file mode 100644 index 98f2c96..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type PropertyType string - -const ( - PTMemory PropertyType = "Memory" - PTGuestMemory PropertyType = "GuestMemory" - PTStatistics PropertyType = "Statistics" - PTProcessList PropertyType = "ProcessList" - PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" - PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" - PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. - PTGuestConnection PropertyType = "GuestConnection" - PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" - PTProcessorTopology PropertyType = "ProcessorTopology" - PTCPUGroup PropertyType = "CpuGroup" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go deleted file mode 100644 index 8d5f5c1..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RdpConnectionOptions struct { - AccessSids []string `json:"AccessSids,omitempty"` - - NamedPipe string `json:"NamedPipe,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go deleted file mode 100644 index 006906f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RegistryChanges struct { - AddValues []RegistryValue `json:"AddValues,omitempty"` - - DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go deleted file mode 100644 index 26fde99..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RegistryKey struct { - Hive string `json:"Hive,omitempty"` - - Name string `json:"Name,omitempty"` - - Volatile bool `json:"Volatile,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go deleted file mode 100644 index 3f20317..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RegistryValue struct { - Key *RegistryKey `json:"Key,omitempty"` - - Name string `json:"Name,omitempty"` - - Type_ string `json:"Type,omitempty"` - - // One and only one value type must be set. - StringValue string `json:"StringValue,omitempty"` - - BinaryValue string `json:"BinaryValue,omitempty"` - - DWordValue int32 `json:"DWordValue,omitempty"` - - QWordValue int32 `json:"QWordValue,omitempty"` - - // Only used if RegistryValueType is CustomType The data is in BinaryValue - CustomType int32 `json:"CustomType,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go deleted file mode 100644 index 778ff58..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RestoreState struct { - - // The path to the save state file to restore the system from. - SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` - - // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. - TemplateSystemId string `json:"TemplateSystemId,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go deleted file mode 100644 index e55fa1d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SaveOptions struct { - - // The type of save operation to be performed. - SaveType string `json:"SaveType,omitempty"` - - // The path to the file that will container the saved state. - SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go deleted file mode 100644 index bf253a4..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Scsi struct { - - // Map of attachments, where the key is the integer LUN number on the controller. - Attachments map[string]Attachment `json:"Attachments,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go deleted file mode 100644 index b8142ca..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import "encoding/json" - -type ServiceProperties struct { - // Changed Properties field to []json.RawMessage from []interface{} to avoid having to - // remarshal sp.Properties[n] and unmarshal into the type(s) we want. - Properties []json.RawMessage `json:"Properties,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go deleted file mode 100644 index df9baa9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SharedMemoryConfiguration struct { - Regions []SharedMemoryRegion `json:"Regions,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go deleted file mode 100644 index 825b718..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SharedMemoryRegion struct { - SectionName string `json:"SectionName,omitempty"` - - StartOffset int32 `json:"StartOffset,omitempty"` - - Length int32 `json:"Length,omitempty"` - - AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` - - HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go deleted file mode 100644 index f67b08e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SharedMemoryRegionInfo struct { - SectionName string `json:"SectionName,omitempty"` - - GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go deleted file mode 100644 index 5eaf6a7..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Silo job information -type SiloProperties struct { - Enabled bool `json:"Enabled,omitempty"` - - JobName string `json:"JobName,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go deleted file mode 100644 index ba7a6b3..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - "time" -) - -// Runtime statistics for a container -type Statistics struct { - Timestamp time.Time `json:"Timestamp,omitempty"` - - ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` - - Uptime100ns uint64 `json:"Uptime100ns,omitempty"` - - Processor *ProcessorStats `json:"Processor,omitempty"` - - Memory *MemoryStats `json:"Memory,omitempty"` - - Storage *StorageStats `json:"Storage,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go deleted file mode 100644 index 2627af9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Storage struct { - - // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. - Layers []Layer `json:"Layers,omitempty"` - - // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. - Path string `json:"Path,omitempty"` - - QoS *StorageQoS `json:"QoS,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go deleted file mode 100644 index 9c5e6eb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type StorageQoS struct { - IopsMaximum int32 `json:"IopsMaximum,omitempty"` - - BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go deleted file mode 100644 index 4f042ff..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Storage runtime statistics -type StorageStats struct { - ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` - - ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"` - - WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"` - - WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go deleted file mode 100644 index 8348699..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Topology struct { - Memory *Memory2 `json:"Memory,omitempty"` - - Processor *Processor2 `json:"Processor,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go deleted file mode 100644 index 0e48ece..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Uefi struct { - EnableDebugger bool `json:"EnableDebugger,omitempty"` - - SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` - - BootThis *UefiBootEntry `json:"BootThis,omitempty"` - - Console string `json:"Console,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go deleted file mode 100644 index 3ab409d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type UefiBootEntry struct { - DeviceType string `json:"DeviceType,omitempty"` - - DevicePath string `json:"DevicePath,omitempty"` - - DiskNumber int32 `json:"DiskNumber,omitempty"` - - OptionalData string `json:"OptionalData,omitempty"` - - VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go deleted file mode 100644 index 2abfccc..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Version struct { - Major int32 `json:"Major,omitempty"` - - Minor int32 `json:"Minor,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go deleted file mode 100644 index ec5d0fb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VideoMonitor struct { - HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` - - VerticalResolution int32 `json:"VerticalResolution,omitempty"` - - ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go deleted file mode 100644 index 2d22b1b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualMachine struct { - - // StopOnReset is private in the schema. If regenerated need to put back. - StopOnReset bool `json:"StopOnReset,omitempty"` - - Chipset *Chipset `json:"Chipset,omitempty"` - - ComputeTopology *Topology `json:"ComputeTopology,omitempty"` - - Devices *Devices `json:"Devices,omitempty"` - - GuestState *GuestState `json:"GuestState,omitempty"` - - RestoreState *RestoreState `json:"RestoreState,omitempty"` - - RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - - StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` - - GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go deleted file mode 100644 index 91a3c83..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualNodeInfo struct { - VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` - - PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` - - VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` - - MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go deleted file mode 100644 index f5b7f3e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualPMemController struct { - Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` - - MaximumCount uint32 `json:"MaximumCount,omitempty"` - - MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` - - Backing string `json:"Backing,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go deleted file mode 100644 index 70cf2d9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualPMemDevice struct { - HostPath string `json:"HostPath,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` - - ImageFormat string `json:"ImageFormat,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go deleted file mode 100644 index f5e0590..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.3 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// TODO: This is pre-release support in schema 2.3. Need to add build number -// docs when a public build with this is out. -type VirtualPciDevice struct { - Functions []VirtualPciFunction `json:",omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go deleted file mode 100644 index cedb7d1..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.3 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// TODO: This is pre-release support in schema 2.3. Need to add build number -// docs when a public build with this is out. -type VirtualPciFunction struct { - DeviceInstancePath string `json:",omitempty"` - - VirtualFunction uint16 `json:",omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go deleted file mode 100644 index 362df36..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualSmb struct { - Shares []VirtualSmbShare `json:"Shares,omitempty"` - - DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go deleted file mode 100644 index 915e9b6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualSmbShare struct { - Name string `json:"Name,omitempty"` - - Path string `json:"Path,omitempty"` - - AllowedFiles []string `json:"AllowedFiles,omitempty"` - - Options *VirtualSmbShareOptions `json:"Options,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go deleted file mode 100644 index 75196bd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualSmbShareOptions struct { - ReadOnly bool `json:"ReadOnly,omitempty"` - - // convert exclusive access to shared read access - ShareRead bool `json:"ShareRead,omitempty"` - - // all opens will use cached I/O - CacheIo bool `json:"CacheIo,omitempty"` - - // disable oplock support - NoOplocks bool `json:"NoOplocks,omitempty"` - - // Acquire the backup privilege when attempting to open - TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` - - // Use the identity of the share root when opening - UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` - - // disable Direct Mapping - NoDirectmap bool `json:"NoDirectmap,omitempty"` - - // disable Byterange locks - NoLocks bool `json:"NoLocks,omitempty"` - - // disable Directory CHange Notifications - NoDirnotify bool `json:"NoDirnotify,omitempty"` - - // share is use for VM shared memory - VmSharedMemory bool `json:"VmSharedMemory,omitempty"` - - // allow access only to the files specified in AllowedFiles - RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` - - // disable all oplocks except Level II - ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` - - // Allow the host to reparse this base layer - ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` - - // Enable pseudo-oplocks - PseudoOplocks bool `json:"PseudoOplocks,omitempty"` - - // All opens will use non-cached IO - NonCacheIo bool `json:"NonCacheIo,omitempty"` - - // Enable pseudo directory change notifications - PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` - - // Block directory enumeration, renames, and deletes. - SingleFileMapping bool `json:"SingleFileMapping,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go deleted file mode 100644 index 8e1836d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VmMemory struct { - AvailableMemory int32 `json:"AvailableMemory,omitempty"` - - AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` - - ReservedMemory uint64 `json:"ReservedMemory,omitempty"` - - AssignedMemory uint64 `json:"AssignedMemory,omitempty"` - - SlpActive bool `json:"SlpActive,omitempty"` - - BalancingEnabled bool `json:"BalancingEnabled,omitempty"` - - DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go deleted file mode 100644 index de1b9cf..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. -type ProcessorLimits struct { - // Maximum amount of host CPU resources that the virtual machine can use. - Limit uint64 `json:"Limit,omitempty"` - // Value describing the relative priority of this virtual machine compared to other virtual machines. - Weight uint64 `json:"Weight,omitempty"` - // Minimum amount of host CPU resources that the virtual machine is guaranteed. - Reservation uint64 `json:"Reservation,omitempty"` - // Provides the target maximum CPU frequency, in MHz, for a virtual machine. - MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go deleted file mode 100644 index 8ed7e56..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type WindowsCrashReporting struct { - DumpFileName string `json:"DumpFileName,omitempty"` - - MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go deleted file mode 100644 index a634dfc..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go +++ /dev/null @@ -1,49 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/vmcompute" -) - -// GetServiceProperties returns properties of the host compute service. -func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { - operation := "hcs::GetServiceProperties" - - queryb, err := json.Marshal(q) - if err != nil { - return nil, err - } - propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, &HcsError{Op: operation, Err: err, Events: events} - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - properties := &hcsschema.ServiceProperties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, err - } - return properties, nil -} - -// ModifyServiceSettings modifies settings of the host compute service. -func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { - operation := "hcs::ModifyServiceSettings" - - settingsJSON, err := json.Marshal(settings) - if err != nil { - return err - } - resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return &HcsError{Op: operation, Err: err, Events: events} - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go deleted file mode 100644 index 75499c9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ /dev/null @@ -1,637 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - "errors" - "strings" - "sync" - "syscall" - - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/timeout" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "go.opencensus.io/trace" -) - -type System struct { - handleLock sync.RWMutex - handle vmcompute.HcsSystem - id string - callbackNumber uintptr - - closedWaitOnce sync.Once - waitBlock chan struct{} - waitError error - exitError error - os, typ string -} - -func newSystem(id string) *System { - return &System{ - id: id, - waitBlock: make(chan struct{}), - } -} - -// CreateComputeSystem creates a new compute system with the given configuration but does not start it. -func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { - operation := "hcs::CreateComputeSystem" - - // hcsCreateComputeSystemContext is an async operation. Start the outer span - // here to measure the full create time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", id)) - - computeSystem := newSystem(id) - - hcsDocumentB, err := json.Marshal(hcsDocumentInterface) - if err != nil { - return nil, err - } - - hcsDocument := string(hcsDocumentB) - - var ( - identity syscall.Handle - resultJSON string - createError error - ) - computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) - if createError == nil || IsPending(createError) { - defer func() { - if err != nil { - computeSystem.Close() - } - }() - if err = computeSystem.registerCallback(ctx); err != nil { - // Terminate the compute system if it still exists. We're okay to - // ignore a failure here. - _ = computeSystem.Terminate(ctx) - return nil, makeSystemError(computeSystem, operation, err, nil) - } - } - - events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) - if err != nil { - if err == ErrTimeout { - // Terminate the compute system if it still exists. We're okay to - // ignore a failure here. - _ = computeSystem.Terminate(ctx) - } - return nil, makeSystemError(computeSystem, operation, err, events) - } - go computeSystem.waitBackground() - if err = computeSystem.getCachedProperties(ctx); err != nil { - return nil, err - } - return computeSystem, nil -} - -// OpenComputeSystem opens an existing compute system by ID. -func OpenComputeSystem(ctx context.Context, id string) (*System, error) { - operation := "hcs::OpenComputeSystem" - - computeSystem := newSystem(id) - handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - computeSystem.handle = handle - defer func() { - if err != nil { - computeSystem.Close() - } - }() - if err = computeSystem.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go computeSystem.waitBackground() - if err = computeSystem.getCachedProperties(ctx); err != nil { - return nil, err - } - return computeSystem, nil -} - -func (computeSystem *System) getCachedProperties(ctx context.Context) error { - props, err := computeSystem.Properties(ctx) - if err != nil { - return err - } - computeSystem.typ = strings.ToLower(props.SystemType) - computeSystem.os = strings.ToLower(props.RuntimeOSType) - if computeSystem.os == "" && computeSystem.typ == "container" { - // Pre-RS5 HCS did not return the OS, but it only supported containers - // that ran Windows. - computeSystem.os = "windows" - } - return nil -} - -// OS returns the operating system of the compute system, "linux" or "windows". -func (computeSystem *System) OS() string { - return computeSystem.os -} - -// IsOCI returns whether processes in the compute system should be created via -// OCI. -func (computeSystem *System) IsOCI() bool { - return computeSystem.os == "linux" && computeSystem.typ == "container" -} - -// GetComputeSystems gets a list of the compute systems on the system that match the query -func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { - operation := "hcs::GetComputeSystems" - - queryb, err := json.Marshal(q) - if err != nil { - return nil, err - } - - computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, &HcsError{Op: operation, Err: err, Events: events} - } - - if computeSystemsJSON == "" { - return nil, ErrUnexpectedValue - } - computeSystems := []schema1.ContainerProperties{} - if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { - return nil, err - } - - return computeSystems, nil -} - -// Start synchronously starts the computeSystem. -func (computeSystem *System) Start(ctx context.Context) (err error) { - operation := "hcs::System::Start" - - // hcsStartComputeSystemContext is an async operation. Start the outer span - // here to measure the full start time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -// ID returns the compute system's identifier. -func (computeSystem *System) ID() string { - return computeSystem.id -} - -// Shutdown requests a compute system shutdown. -func (computeSystem *System) Shutdown(ctx context.Context) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Shutdown" - - if computeSystem.handle == 0 { - return nil - } - - resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") - events := processHcsResult(ctx, resultJSON) - switch err { - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: - return makeSystemError(computeSystem, operation, err, events) - } - return nil -} - -// Terminate requests a compute system terminate. -func (computeSystem *System) Terminate(ctx context.Context) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Terminate" - - if computeSystem.handle == 0 { - return nil - } - - resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") - events := processHcsResult(ctx, resultJSON) - switch err { - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: - return makeSystemError(computeSystem, operation, err, events) - } - return nil -} - -// waitBackground waits for the compute system exit notification. Once received -// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. -// -// This MUST be called exactly once per `computeSystem.handle` but `Wait` is -// safe to call multiple times. -func (computeSystem *System) waitBackground() { - operation := "hcs::System::waitBackground" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { - case nil: - log.G(ctx).Debug("system exited") - case ErrVmcomputeUnexpectedExit: - log.G(ctx).Debug("unexpected system exit") - computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) - err = nil - default: - err = makeSystemError(computeSystem, operation, err, nil) - } - computeSystem.closedWaitOnce.Do(func() { - computeSystem.waitError = err - close(computeSystem.waitBlock) - }) - oc.SetSpanStatus(span, err) -} - -// Wait synchronously waits for the compute system to shutdown or terminate. If -// the compute system has already exited returns the previous error (if any). -func (computeSystem *System) Wait() error { - <-computeSystem.waitBlock - return computeSystem.waitError -} - -// ExitError returns an error describing the reason the compute system terminated. -func (computeSystem *System) ExitError() error { - select { - case <-computeSystem.waitBlock: - if computeSystem.waitError != nil { - return computeSystem.waitError - } - return computeSystem.exitError - default: - return errors.New("container not exited") - } -} - -// Properties returns the requested container properties targeting a V1 schema container. -func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Properties" - - queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - properties := &schema1.ContainerProperties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - return properties, nil -} - -// PropertiesV2 returns the requested container properties targeting a V2 schema container. -func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::PropertiesV2" - - queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - properties := &hcsschema.Properties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - return properties, nil -} - -// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Pause(ctx context.Context) (err error) { - operation := "hcs::System::Pause" - - // hcsPauseComputeSystemContext is an async peration. Start the outer span - // here to measure the full pause time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Resume(ctx context.Context) (err error) { - operation := "hcs::System::Resume" - - // hcsResumeComputeSystemContext is an async operation. Start the outer span - // here to measure the full restore time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -// Save the compute system -func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { - operation := "hcs::System::Save" - - // hcsSaveComputeSystemContext is an async peration. Start the outer span - // here to measure the full save time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - saveOptions, err := json.Marshal(options) - if err != nil { - return err - } - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) - events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - configurationb, err := json.Marshal(c) - if err != nil { - return nil, nil, makeSystemError(computeSystem, operation, err, nil) - } - - configuration := string(configurationb) - processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, nil, makeSystemError(computeSystem, operation, err, events) - } - - log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") - return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil -} - -// CreateProcess launches a new process within the computeSystem. -func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { - operation := "hcs::System::CreateProcess" - process, processInfo, err := computeSystem.createProcess(ctx, operation, c) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - process.Close() - } - }() - - pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - process.stdin = pipes[0] - process.stdout = pipes[1] - process.stderr = pipes[2] - process.hasCachedStdio = true - - if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go process.waitBackground() - - return process, nil -} - -// OpenProcess gets an interface to an existing process within the computeSystem. -func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::OpenProcess" - - if computeSystem.handle == 0 { - return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - process := newProcess(processHandle, pid, computeSystem) - if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go process.waitBackground() - - return process, nil -} - -// Close cleans up any state associated with the compute system but does not terminate or wait for it. -func (computeSystem *System) Close() (err error) { - operation := "hcs::System::Close" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.Lock() - defer computeSystem.handleLock.Unlock() - - // Don't double free this - if computeSystem.handle == 0 { - return nil - } - - if err = computeSystem.unregisterCallback(ctx); err != nil { - return makeSystemError(computeSystem, operation, err, nil) - } - - err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) - if err != nil { - return makeSystemError(computeSystem, operation, err, nil) - } - - computeSystem.handle = 0 - computeSystem.closedWaitOnce.Do(func() { - computeSystem.waitError = ErrAlreadyClosed - close(computeSystem.waitBlock) - }) - - return nil -} - -func (computeSystem *System) registerCallback(ctx context.Context) error { - callbackContext := ¬ificationWatcherContext{ - channels: newSystemChannels(), - systemID: computeSystem.id, - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = callbackContext - callbackMapLock.Unlock() - - callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) - if err != nil { - return err - } - callbackContext.handle = callbackHandle - computeSystem.callbackNumber = callbackNumber - - return nil -} - -func (computeSystem *System) unregisterCallback(ctx context.Context) error { - callbackNumber := computeSystem.callbackNumber - - callbackMapLock.RLock() - callbackContext := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if callbackContext == nil { - return nil - } - - handle := callbackContext.handle - - if handle == 0 { - return nil - } - - // hcsUnregisterComputeSystemCallback has its own syncronization - // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) - if err != nil { - return err - } - - closeChannels(callbackContext.channels) - - callbackMapLock.Lock() - delete(callbackMap, callbackNumber) - callbackMapLock.Unlock() - - handle = 0 //nolint:ineffassign - - return nil -} - -// Modify the System by sending a request to HCS -func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Modify" - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - requestBytes, err := json.Marshal(config) - if err != nil { - return err - } - - requestJSON := string(requestBytes) - resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go deleted file mode 100644 index 3342e5b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ /dev/null @@ -1,62 +0,0 @@ -package hcs - -import ( - "context" - "io" - "syscall" - - "github.com/Microsoft/go-winio" - diskutil "github.com/Microsoft/go-winio/vhd" - "github.com/Microsoft/hcsshim/computestorage" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles -// if there is an error. -func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { - fs := make([]io.ReadWriteCloser, len(hs)) - for i, h := range hs { - if h != syscall.Handle(0) { - if err == nil { - fs[i], err = winio.MakeOpenFile(h) - } - if err != nil { - syscall.Close(h) - } - } - } - if err != nil { - for _, f := range fs { - if f != nil { - f.Close() - } - } - return nil, err - } - return fs, nil -} - -// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. -func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { - if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { - return errors.Wrap(err, "failed to create VHD") - } - - vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) - if err != nil { - return errors.Wrap(err, "failed to open VHD") - } - defer func() { - err2 := windows.CloseHandle(windows.Handle(vhd)) - if err == nil { - err = errors.Wrap(err2, "failed to close VHD") - } - }() - - if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { - return errors.Wrap(err, "failed to format VHD") - } - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go deleted file mode 100644 index db4e14f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go +++ /dev/null @@ -1,68 +0,0 @@ -package hcs - -import ( - "context" - "time" - - "github.com/Microsoft/hcsshim/internal/log" -) - -func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { - events := processHcsResult(ctx, resultJSON) - if IsPending(err) { - return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) - } - - return events, err -} - -func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { - callbackMapLock.RLock() - if _, ok := callbackMap[callbackNumber]; !ok { - callbackMapLock.RUnlock() - log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") - return ErrHandleClose - } - channels := callbackMap[callbackNumber].channels - callbackMapLock.RUnlock() - - expectedChannel := channels[expectedNotification] - if expectedChannel == nil { - log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") - return ErrInvalidNotificationType - } - - var c <-chan time.Time - if timeout != nil { - timer := time.NewTimer(*timeout) - c = timer.C - defer timer.Stop() - } - - select { - case err, ok := <-expectedChannel: - if !ok { - return ErrHandleClose - } - return err - case err, ok := <-channels[hcsNotificationSystemExited]: - if !ok { - return ErrHandleClose - } - // If the expected notification is hcsNotificationSystemExited which of the two selects - // chosen is random. Return the raw error if hcsNotificationSystemExited is expected - if channels[hcsNotificationSystemExited] == expectedChannel { - return err - } - return ErrUnexpectedContainerExit - case _, ok := <-channels[hcsNotificationServiceDisconnect]: - if !ok { - return ErrHandleClose - } - // hcsNotificationServiceDisconnect should never be an expected notification - // it does not need the same handling as hcsNotificationSystemExited - return ErrUnexpectedProcessAbort - case <-c: - return ErrTimeout - } -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go deleted file mode 100644 index 921c2c8..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go +++ /dev/null @@ -1,47 +0,0 @@ -package hcserror - -import ( - "fmt" - "syscall" -) - -const ERROR_GEN_FAILURE = syscall.Errno(31) - -type HcsError struct { - title string - rest string - Err error -} - -func (e *HcsError) Error() string { - s := e.title - if len(s) > 0 && s[len(s)-1] != ' ' { - s += " " - } - s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) - if e.rest != "" { - if e.rest[0] != ' ' { - s += " " - } - s += e.rest - } - return s -} - -func New(err error, title, rest string) error { - // Pass through DLL errors directly since they do not originate from HCS. - if _, ok := err.(*syscall.DLLError); ok { - return err - } - return &HcsError{title, rest, err} -} - -func Win32FromError(err error) uint32 { - if herr, ok := err.(*HcsError); ok { - return Win32FromError(herr.Err) - } - if code, ok := err.(syscall.Errno); ok { - return uint32(code) - } - return uint32(ERROR_GEN_FAILURE) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go deleted file mode 100644 index b2e475f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go +++ /dev/null @@ -1,23 +0,0 @@ -package hns - -import "fmt" - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go - -//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? - -type EndpointNotFoundError struct { - EndpointName string -} - -func (e EndpointNotFoundError) Error() string { - return fmt.Sprintf("Endpoint %s not found", e.EndpointName) -} - -type NetworkNotFoundError struct { - NetworkName string -} - -func (e NetworkNotFoundError) Error() string { - return fmt.Sprintf("Network %s not found", e.NetworkName) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go deleted file mode 100644 index 262714b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ /dev/null @@ -1,337 +0,0 @@ -package hns - -import ( - "encoding/json" - "net" - "strings" - - "github.com/sirupsen/logrus" -) - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - VirtualNetwork string `json:",omitempty"` - VirtualNetworkName string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress net.IP `json:",omitempty"` - IPv6Address net.IP `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - GatewayAddressV6 string `json:",omitempty"` - EnableInternalDNS bool `json:",omitempty"` - DisableICC bool `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` - IPv6PrefixLength uint8 `json:",omitempty"` - IsRemoteEndpoint bool `json:",omitempty"` - EnableLowMetric bool `json:",omitempty"` - Namespace *Namespace `json:",omitempty"` - EncapOverhead uint16 `json:",omitempty"` - SharedContainers []string `json:",omitempty"` -} - -//SystemType represents the type of the system on which actions are done -type SystemType string - -// SystemType const -const ( - ContainerType SystemType = "Container" - VirtualMachineType SystemType = "VirtualMachine" - HostType SystemType = "Host" -) - -// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type EndpointAttachDetachRequest struct { - ContainerID string `json:"ContainerId,omitempty"` - SystemType SystemType `json:"SystemType"` - CompartmentID uint16 `json:"CompartmentId,omitempty"` - VirtualNICName string `json:"VirtualNicName,omitempty"` -} - -// EndpointResquestResponse is object to get the endpoint request response -type EndpointResquestResponse struct { - Success bool - Error string -} - -// EndpointStats is the object that has stats for a given endpoint -type EndpointStats struct { - BytesReceived uint64 `json:"BytesReceived"` - BytesSent uint64 `json:"BytesSent"` - DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` - DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` - EndpointID string `json:"EndpointId"` - InstanceID string `json:"InstanceId"` - PacketsReceived uint64 `json:"PacketsReceived"` - PacketsSent uint64 `json:"PacketsSent"` -} - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - endpoint := &HNSEndpoint{} - err := hnsCall(method, "/endpoints/"+path, request, &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// HNSListEndpointRequest makes a HNS call to query the list of available endpoints -func HNSListEndpointRequest() ([]HNSEndpoint, error) { - var endpoint []HNSEndpoint - err := hnsCall("GET", "/endpoints/", "", &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID -func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { - var stats EndpointStats - err := hnsCall("GET", "/endpointstats/"+id, "", &stats) - if err != nil { - return nil, err - } - - return &stats, nil -} - -// GetHNSEndpointByID get the Endpoint by ID -func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { - return HNSEndpointRequest("GET", endpointID, "") -} - -// GetHNSEndpointStats get the stats for a n Endpoint by ID -func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { - return hnsEndpointStatsRequest(endpointID) -} - -// GetHNSEndpointByName gets the endpoint filtered by Name -func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { - hnsResponse, err := HNSListEndpointRequest() - if err != nil { - return nil, err - } - for _, hnsEndpoint := range hnsResponse { - if hnsEndpoint.Name == endpointName { - return &hnsEndpoint, nil - } - } - return nil, EndpointNotFoundError{EndpointName: endpointName} -} - -type endpointAttachInfo struct { - SharedContainers json.RawMessage `json:",omitempty"` -} - -func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { - attachInfo := endpointAttachInfo{} - err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo) - - // Return false allows us to just return the err - if err != nil { - return false, err - } - - if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) { - return true, nil - } - - return false, nil - -} - -// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods -func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { - operation := "Create" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - return HNSEndpointRequest("POST", "", string(jsonString)) -} - -// Delete Endpoint by sending EndpointRequest to HNS -func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { - operation := "Delete" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - return HNSEndpointRequest("DELETE", endpoint.Id, "") -} - -// Update Endpoint -func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { - operation := "Update" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) - - return endpoint, err -} - -// ApplyACLPolicy applies a set of ACL Policies on the Endpoint -func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { - operation := "ApplyACLPolicy" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - for _, policy := range policies { - if policy == nil { - continue - } - jsonString, err := json.Marshal(policy) - if err != nil { - return err - } - endpoint.Policies = append(endpoint.Policies, jsonString) - } - - _, err := endpoint.Update() - return err -} - -// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint -func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { - operation := "ApplyProxyPolicy" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - for _, policy := range policies { - if policy == nil { - continue - } - jsonString, err := json.Marshal(policy) - if err != nil { - return err - } - endpoint.Policies = append(endpoint.Policies, jsonString) - } - - _, err := endpoint.Update() - return err -} - -// ContainerAttach attaches an endpoint to container -func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { - operation := "ContainerAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - CompartmentID: compartmentID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// ContainerDetach detaches an endpoint from container -func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { - operation := "ContainerDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// HostAttach attaches a nic on the host -func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { - operation := "HostAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - CompartmentID: compartmentID, - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) - -} - -// HostDetach detaches a nic on the host -func (endpoint *HNSEndpoint) HostDetach() error { - operation := "HostDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// VirtualMachineNICAttach attaches a endpoint to a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { - operation := "VirtualMachineNicAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - VirtualNICName: virtualMachineNICName, - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// VirtualMachineNICDetach detaches a endpoint from a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { - operation := "VirtualMachineNicDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go deleted file mode 100644 index 2df4a57..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ /dev/null @@ -1,49 +0,0 @@ -package hns - -import ( - "encoding/json" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { - var responseBuffer *uint16 - logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) - - err := _hnsCall(method, path, request, &responseBuffer) - if err != nil { - return nil, hcserror.New(err, "hnsCall ", "") - } - response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) - - hnsresponse := &hnsResponse{} - if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return nil, err - } - return hnsresponse, nil -} - -func hnsCall(method, path, request string, returnResponse interface{}) error { - hnsresponse, err := hnsCallRawResponse(method, path, request) - if err != nil { - return fmt.Errorf("failed during hnsCallRawResponse: %v", err) - } - if !hnsresponse.Success { - return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) - } - - if len(hnsresponse.Output) == 0 { - return nil - } - - logrus.Debugf("Network Response : %s", hnsresponse.Output) - err = json.Unmarshal(hnsresponse.Output, returnResponse) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go deleted file mode 100644 index a8d8cc5..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go +++ /dev/null @@ -1,28 +0,0 @@ -package hns - -type HNSGlobals struct { - Version HNSVersion `json:"Version"` -} - -type HNSVersion struct { - Major int `json:"Major"` - Minor int `json:"Minor"` -} - -var ( - HNSVersion1803 = HNSVersion{Major: 7, Minor: 2} -) - -func GetHNSGlobals() (*HNSGlobals, error) { - var version HNSVersion - err := hnsCall("GET", "/globals/version", "", &version) - if err != nil { - return nil, err - } - - globals := &HNSGlobals{ - Version: version, - } - - return globals, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go deleted file mode 100644 index f12d3ab..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go +++ /dev/null @@ -1,141 +0,0 @@ -package hns - -import ( - "encoding/json" - "errors" - "github.com/sirupsen/logrus" - "net" -) - -// Subnet is assoicated with a network and represents a list -// of subnets available to the network -type Subnet struct { - AddressPrefix string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` -} - -// MacPool is assoicated with a network and represents a list -// of macaddresses available to the network -type MacPool struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// HNSNetwork represents a network in HNS -type HNSNetwork struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - Type string `json:",omitempty"` - NetworkAdapterName string `json:",omitempty"` - SourceMac string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacPools []MacPool `json:",omitempty"` - Subnets []Subnet `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - DNSServerCompartment uint32 `json:",omitempty"` - ManagementIP string `json:",omitempty"` - AutomaticDNS bool `json:",omitempty"` -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - var network HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return &network, nil -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - var network []HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return network, nil -} - -// GetHNSNetworkByID -func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { - return HNSNetworkRequest("GET", networkID, "") -} - -// GetHNSNetworkName filtered by Name -func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { - hsnnetworks, err := HNSListNetworkRequest("GET", "", "") - if err != nil { - return nil, err - } - for _, hnsnetwork := range hsnnetworks { - if hnsnetwork.Name == networkName { - return &hnsnetwork, nil - } - } - return nil, NetworkNotFoundError{NetworkName: networkName} -} - -// Create Network by sending NetworkRequest to HNS. -func (network *HNSNetwork) Create() (*HNSNetwork, error) { - operation := "Create" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - for _, subnet := range network.Subnets { - if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { - return nil, errors.New("network create error, subnet has address prefix but no gateway specified") - } - } - - jsonString, err := json.Marshal(network) - if err != nil { - return nil, err - } - return HNSNetworkRequest("POST", "", string(jsonString)) -} - -// Delete Network by sending NetworkRequest to HNS -func (network *HNSNetwork) Delete() (*HNSNetwork, error) { - operation := "Delete" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - return HNSNetworkRequest("DELETE", network.Id, "") -} - -// Creates an endpoint on the Network. -func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { - return &HNSEndpoint{ - VirtualNetwork: network.Id, - IPAddress: ipAddress, - MacAddress: string(macAddress), - } -} - -func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateEndpoint" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) - - endpoint.VirtualNetwork = network.Id - return endpoint.Create() -} - -func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateRemoteEndpoint" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - endpoint.IsRemoteEndpoint = true - return network.CreateEndpoint(endpoint) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go deleted file mode 100644 index 6765aae..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ /dev/null @@ -1,109 +0,0 @@ -package hns - -// Type of Request Support in ModifySystem -type PolicyType string - -// RequestType const -const ( - Nat PolicyType = "NAT" - ACL PolicyType = "ACL" - PA PolicyType = "PA" - VLAN PolicyType = "VLAN" - VSID PolicyType = "VSID" - VNet PolicyType = "VNET" - L2Driver PolicyType = "L2Driver" - Isolation PolicyType = "Isolation" - QOS PolicyType = "QOS" - OutboundNat PolicyType = "OutBoundNAT" - ExternalLoadBalancer PolicyType = "ELB" - Route PolicyType = "ROUTE" - Proxy PolicyType = "PROXY" -) - -type NatPolicy struct { - Type PolicyType `json:"Type"` - Protocol string - InternalPort uint16 - ExternalPort uint16 -} - -type QosPolicy struct { - Type PolicyType `json:"Type"` - MaximumOutgoingBandwidthInBytes uint64 -} - -type IsolationPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint - VSID uint - InDefaultIsolation bool -} - -type VlanPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint -} - -type VsidPolicy struct { - Type PolicyType `json:"Type"` - VSID uint -} - -type PaPolicy struct { - Type PolicyType `json:"Type"` - PA string `json:"PA"` -} - -type OutboundNatPolicy struct { - Policy - VIP string `json:"VIP,omitempty"` - Exceptions []string `json:"ExceptionList,omitempty"` - Destinations []string `json:",omitempty"` -} - -type ProxyPolicy struct { - Type PolicyType `json:"Type"` - IP string `json:",omitempty"` - Port string `json:",omitempty"` - ExceptionList []string `json:",omitempty"` - Destination string `json:",omitempty"` - OutboundNat bool `json:",omitempty"` -} - -type ActionType string -type DirectionType string -type RuleType string - -const ( - Allow ActionType = "Allow" - Block ActionType = "Block" - - In DirectionType = "In" - Out DirectionType = "Out" - - Host RuleType = "Host" - Switch RuleType = "Switch" -) - -type ACLPolicy struct { - Type PolicyType `json:"Type"` - Id string `json:"Id,omitempty"` - Protocol uint16 - Protocols string `json:"Protocols,omitempty"` - InternalPort uint16 - Action ActionType - Direction DirectionType - LocalAddresses string - RemoteAddresses string - LocalPorts string `json:"LocalPorts,omitempty"` - LocalPort uint16 - RemotePorts string `json:"RemotePorts,omitempty"` - RemotePort uint16 - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 - ServiceName string -} - -type Policy struct { - Type PolicyType `json:"Type"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go deleted file mode 100644 index 31322a6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go +++ /dev/null @@ -1,201 +0,0 @@ -package hns - -import ( - "encoding/json" - - "github.com/sirupsen/logrus" -) - -// RoutePolicy is a structure defining schema for Route based Policy -type RoutePolicy struct { - Policy - DestinationPrefix string `json:"DestinationPrefix,omitempty"` - NextHop string `json:"NextHop,omitempty"` - EncapEnabled bool `json:"NeedEncap,omitempty"` -} - -// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy -type ELBPolicy struct { - LBPolicy - SourceVIP string `json:"SourceVIP,omitempty"` - VIPs []string `json:"VIPs,omitempty"` - ILB bool `json:"ILB,omitempty"` - DSR bool `json:"IsDSR,omitempty"` -} - -// LBPolicy is a structure defining schema for LoadBalancing based Policy -type LBPolicy struct { - Policy - Protocol uint16 `json:"Protocol,omitempty"` - InternalPort uint16 - ExternalPort uint16 -} - -// PolicyList is a structure defining schema for Policy list request -type PolicyList struct { - ID string `json:"ID,omitempty"` - EndpointReferences []string `json:"References,omitempty"` - Policies []json.RawMessage `json:"Policies,omitempty"` -} - -// HNSPolicyListRequest makes a call into HNS to update/query a single network -func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { - var policy PolicyList - err := hnsCall(method, "/policylists/"+path, request, &policy) - if err != nil { - return nil, err - } - - return &policy, nil -} - -// HNSListPolicyListRequest gets all the policy list -func HNSListPolicyListRequest() ([]PolicyList, error) { - var plist []PolicyList - err := hnsCall("GET", "/policylists/", "", &plist) - if err != nil { - return nil, err - } - - return plist, nil -} - -// PolicyListRequest makes a HNS call to modify/query a network policy list -func PolicyListRequest(method, path, request string) (*PolicyList, error) { - policylist := &PolicyList{} - err := hnsCall(method, "/policylists/"+path, request, &policylist) - if err != nil { - return nil, err - } - - return policylist, nil -} - -// GetPolicyListByID get the policy list by ID -func GetPolicyListByID(policyListID string) (*PolicyList, error) { - return PolicyListRequest("GET", policyListID, "") -} - -// Create PolicyList by sending PolicyListRequest to HNS. -func (policylist *PolicyList) Create() (*PolicyList, error) { - operation := "Create" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - jsonString, err := json.Marshal(policylist) - if err != nil { - return nil, err - } - return PolicyListRequest("POST", "", string(jsonString)) -} - -// Delete deletes PolicyList -func (policylist *PolicyList) Delete() (*PolicyList, error) { - operation := "Delete" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - - return PolicyListRequest("DELETE", policylist.ID, "") -} - -// AddEndpoint add an endpoint to a Policy List -func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "AddEndpoint" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - - return policylist.Create() -} - -// RemoveEndpoint removes an endpoint from the Policy List -func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "RemoveEndpoint" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - elementToRemove := "/endpoints/" + endpoint.Id - - var references []string - - for _, endpointReference := range policylist.EndpointReferences { - if endpointReference == elementToRemove { - continue - } - references = append(references, endpointReference) - } - policylist.EndpointReferences = references - return policylist.Create() -} - -// AddLoadBalancer policy list for the specified endpoints -func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { - operation := "AddLoadBalancer" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) - - policylist := &PolicyList{} - - elbPolicy := &ELBPolicy{ - SourceVIP: sourceVIP, - ILB: isILB, - } - - if len(vip) > 0 { - elbPolicy.VIPs = []string{vip} - } - elbPolicy.Type = ExternalLoadBalancer - elbPolicy.Protocol = protocol - elbPolicy.InternalPort = internalPort - elbPolicy.ExternalPort = externalPort - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(elbPolicy) - if err != nil { - return nil, err - } - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} - -// AddRoute adds route policy list for the specified endpoints -func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { - operation := "AddRoute" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) - - policylist := &PolicyList{} - - rPolicy := &RoutePolicy{ - DestinationPrefix: destinationPrefix, - NextHop: nextHop, - EncapEnabled: encapEnabled, - } - rPolicy.Type = Route - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(rPolicy) - if err != nil { - return nil, err - } - - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go deleted file mode 100644 index d5efba7..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go +++ /dev/null @@ -1,49 +0,0 @@ -package hns - -import ( - "github.com/sirupsen/logrus" -) - -type HNSSupportedFeatures struct { - Acl HNSAclFeatures `json:"ACL"` -} - -type HNSAclFeatures struct { - AclAddressLists bool `json:"AclAddressLists"` - AclNoHostRulePriority bool `json:"AclHostRulePriority"` - AclPortRanges bool `json:"AclPortRanges"` - AclRuleId bool `json:"AclRuleId"` -} - -func GetHNSSupportedFeatures() HNSSupportedFeatures { - var hnsFeatures HNSSupportedFeatures - - globals, err := GetHNSGlobals() - if err != nil { - // Expected on pre-1803 builds, all features will be false/unsupported - logrus.Debugf("Unable to obtain HNS globals: %s", err) - return hnsFeatures - } - - hnsFeatures.Acl = HNSAclFeatures{ - AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803), - } - - return hnsFeatures -} - -func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool { - if currentVersion.Major < minVersionSupported.Major { - return false - } - if currentVersion.Major > minVersionSupported.Major { - return true - } - if currentVersion.Minor < minVersionSupported.Minor { - return false - } - return true -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go deleted file mode 100644 index d3b04ee..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ /dev/null @@ -1,111 +0,0 @@ -package hns - -import ( - "encoding/json" - "fmt" - "os" - "path" - "strings" -) - -type namespaceRequest struct { - IsDefault bool `json:",omitempty"` -} - -type namespaceEndpointRequest struct { - ID string `json:"Id"` -} - -type NamespaceResource struct { - Type string - Data json.RawMessage -} - -type namespaceResourceRequest struct { - Type string - Data interface{} -} - -type Namespace struct { - ID string - IsDefault bool `json:",omitempty"` - ResourceList []NamespaceResource `json:",omitempty"` - CompartmentId uint32 `json:",omitempty"` -} - -func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { - var err error - hnspath := "/namespaces/" - if id != nil { - hnspath = path.Join(hnspath, *id) - } - if subpath != "" { - hnspath = path.Join(hnspath, subpath) - } - var reqJSON []byte - if request != nil { - if reqJSON, err = json.Marshal(request); err != nil { - return nil, err - } - } - var ns Namespace - err = hnsCall(method, hnspath, string(reqJSON), &ns) - if err != nil { - if strings.Contains(err.Error(), "Element not found.") { - return nil, os.ErrNotExist - } - return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) - } - return &ns, err -} - -func CreateNamespace() (string, error) { - req := namespaceRequest{} - ns, err := issueNamespaceRequest(nil, "POST", "", &req) - if err != nil { - return "", err - } - return ns.ID, nil -} - -func RemoveNamespace(id string) error { - _, err := issueNamespaceRequest(&id, "DELETE", "", nil) - return err -} - -func GetNamespaceEndpoints(id string) ([]string, error) { - ns, err := issueNamespaceRequest(&id, "GET", "", nil) - if err != nil { - return nil, err - } - var endpoints []string - for _, rsrc := range ns.ResourceList { - if rsrc.Type == "Endpoint" { - var endpoint namespaceEndpointRequest - err = json.Unmarshal(rsrc.Data, &endpoint) - if err != nil { - return nil, fmt.Errorf("unmarshal endpoint: %s", err) - } - endpoints = append(endpoints, endpoint.ID) - } - } - return endpoints, nil -} - -func AddNamespaceEndpoint(id string, endpointID string) error { - resource := namespaceResourceRequest{ - Type: "Endpoint", - Data: namespaceEndpointRequest{endpointID}, - } - _, err := issueNamespaceRequest(&id, "POST", "addresource", &resource) - return err -} - -func RemoveNamespaceEndpoint(id string, endpointID string) error { - resource := namespaceResourceRequest{ - Type: "Endpoint", - Data: namespaceEndpointRequest{endpointID}, - } - _, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource) - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go deleted file mode 100644 index 204633a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package hns - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - - procHNSCall = modvmcompute.NewProc("HNSCall") -) - -func _hnsCall(method string, path string, object string, response **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(method) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(object) - if hr != nil { - return - } - return __hnsCall(_p0, _p1, _p2, response) -} - -func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { - if hr = procHNSCall.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go deleted file mode 100644 index 922f7c6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go +++ /dev/null @@ -1,23 +0,0 @@ -package interop - -import ( - "syscall" - "unsafe" -) - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go - -//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree - -func ConvertAndFreeCoTaskMemString(buffer *uint16) string { - str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) - coTaskMemFree(unsafe.Pointer(buffer)) - return str -} - -func Win32FromHresult(hr uintptr) syscall.Errno { - if hr&0x1fff0000 == 0x00070000 { - return syscall.Errno(hr & 0xffff) - } - return syscall.Errno(hr) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go deleted file mode 100644 index 12b0c71..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package interop - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") - - procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") -) - -func coTaskMemFree(buffer unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go deleted file mode 100644 index ba6b1a4..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go +++ /dev/null @@ -1,23 +0,0 @@ -package log - -import ( - "context" - - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx` -// contains an OpenCensus `trace.Span`. -func G(ctx context.Context) *logrus.Entry { - span := trace.FromContext(ctx) - if span != nil { - sctx := span.SpanContext() - return logrus.WithFields(logrus.Fields{ - "traceID": sctx.TraceID.String(), - "spanID": sctx.SpanID.String(), - // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this? - }) - } - return logrus.NewEntry(logrus.StandardLogger()) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go deleted file mode 100644 index cf2c166..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go +++ /dev/null @@ -1,32 +0,0 @@ -package logfields - -const ( - // Identifiers - - ContainerID = "cid" - UVMID = "uvm-id" - ProcessID = "pid" - - // Common Misc - - // Timeout represents an operation timeout. - Timeout = "timeout" - JSON = "json" - - // Keys/values - - Field = "field" - OCIAnnotation = "oci-annotation" - Value = "value" - - // Golang type's - - ExpectedType = "expected-type" - Bool = "bool" - Uint32 = "uint32" - Uint64 = "uint64" - - // runhcs - - VMShimOperation = "vmshim-op" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go deleted file mode 100644 index e5b8b85..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go +++ /dev/null @@ -1,24 +0,0 @@ -package longpath - -import ( - "path/filepath" - "strings" -) - -// LongAbs makes a path absolute and returns it in NT long path form. -func LongAbs(path string) (string, error) { - if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { - return path, nil - } - if !filepath.IsAbs(path) { - absPath, err := filepath.Abs(path) - if err != nil { - return "", err - } - path = absPath - } - if strings.HasPrefix(path, `\\`) { - return `\\?\UNC\` + path[2:], nil - } - return `\\?\` + path, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go deleted file mode 100644 index 7e95efb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go +++ /dev/null @@ -1,52 +0,0 @@ -package mergemaps - -import "encoding/json" - -// Merge recursively merges map `fromMap` into map `ToMap`. Any pre-existing values -// in ToMap are overwritten. Values in fromMap are added to ToMap. -// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang -func Merge(fromMap, ToMap interface{}) interface{} { - switch fromMap := fromMap.(type) { - case map[string]interface{}: - ToMap, ok := ToMap.(map[string]interface{}) - if !ok { - return fromMap - } - for keyToMap, valueToMap := range ToMap { - if valueFromMap, ok := fromMap[keyToMap]; ok { - fromMap[keyToMap] = Merge(valueFromMap, valueToMap) - } else { - fromMap[keyToMap] = valueToMap - } - } - case nil: - // merge(nil, map[string]interface{...}) -> map[string]interface{...} - ToMap, ok := ToMap.(map[string]interface{}) - if ok { - return ToMap - } - } - return fromMap -} - -// MergeJSON merges the contents of a JSON string into an object representation, -// returning a new object suitable for translating to JSON. -func MergeJSON(object interface{}, additionalJSON []byte) (interface{}, error) { - if len(additionalJSON) == 0 { - return object, nil - } - objectJSON, err := json.Marshal(object) - if err != nil { - return nil, err - } - var objectMap, newMap map[string]interface{} - err = json.Unmarshal(objectJSON, &objectMap) - if err != nil { - return nil, err - } - err = json.Unmarshal(additionalJSON, &newMap) - if err != nil { - return nil, err - } - return Merge(newMap, objectMap), nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go deleted file mode 100644 index f428bda..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go +++ /dev/null @@ -1,43 +0,0 @@ -package oc - -import ( - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -var _ = (trace.Exporter)(&LogrusExporter{}) - -// LogrusExporter is an OpenCensus `trace.Exporter` that exports -// `trace.SpanData` to logrus output. -type LogrusExporter struct { -} - -// ExportSpan exports `s` based on the the following rules: -// -// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, -// `s.ParentSpanID` for correlation -// -// 2. Any calls to .Annotate will not be supported. -// -// 3. The span itself will be written at `logrus.InfoLevel` unless -// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` -// providing `s.Status.Message` as the error value. -func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { - // Combine all span annotations with traceID, spanID, parentSpanID - baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) - baseEntry.Data["traceID"] = s.TraceID.String() - baseEntry.Data["spanID"] = s.SpanID.String() - baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() - baseEntry.Data["startTime"] = s.StartTime - baseEntry.Data["endTime"] = s.EndTime - baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() - baseEntry.Data["name"] = s.Name - baseEntry.Time = s.StartTime - - level := logrus.InfoLevel - if s.Status.Code != 0 { - level = logrus.ErrorLevel - baseEntry.Data[logrus.ErrorKey] = s.Status.Message - } - baseEntry.Log(level, "Span") -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go deleted file mode 100644 index fee4765..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go +++ /dev/null @@ -1,17 +0,0 @@ -package oc - -import ( - "go.opencensus.io/trace" -) - -// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If -// `err` is `nil` assumes `trace.StatusCodeOk`. -func SetSpanStatus(span *trace.Span, err error) { - status := trace.Status{} - if err != nil { - // TODO: JTERRY75 - Handle errors in a non-generic way - status.Code = trace.StatusCodeUnknown - status.Message = err.Error() - } - span.SetStatus(status) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go deleted file mode 100644 index 6086c1d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go +++ /dev/null @@ -1,288 +0,0 @@ -package regstate - -import ( - "encoding/json" - "fmt" - "net/url" - "os" - "path/filepath" - "reflect" - "syscall" - - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" -) - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go - -//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW - -const ( - _REG_OPTION_VOLATILE = 1 - - _REG_OPENED_EXISTING_KEY = 2 -) - -type Key struct { - registry.Key - Name string -} - -var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"} -var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} - -var rootPath = `SOFTWARE\Microsoft\runhcs` - -type NotFoundError struct { - Id string -} - -func (err *NotFoundError) Error() string { - return fmt.Sprintf("ID '%s' was not found", err.Id) -} - -func IsNotFoundError(err error) bool { - _, ok := err.(*NotFoundError) - return ok -} - -type NoStateError struct { - ID string - Key string -} - -func (err *NoStateError) Error() string { - return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID) -} - -func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) { - var ( - h syscall.Handle - d uint32 - ) - fullpath := filepath.Join(k.Name, path) - pathPtr, _ := windows.UTF16PtrFromString(path) - err = regCreateKeyEx(syscall.Handle(k.Key), pathPtr, 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) - if err != nil { - return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err} - } - return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil -} - -func hive(perUser bool) *Key { - r := localMachine - if perUser { - r = localUser - } - return r -} - -func Open(root string, perUser bool) (*Key, error) { - k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS) - if err != nil { - return nil, err - } - defer k.Close() - - k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS) - if err != nil { - return nil, err - } - return k2, nil -} - -func RemoveAll(root string, perUser bool) error { - k, err := hive(perUser).open(rootPath) - if err != nil { - return err - } - defer k.Close() - r, err := k.open(url.PathEscape(root)) - if err != nil { - return err - } - defer r.Close() - ids, err := r.Enumerate() - if err != nil { - return err - } - for _, id := range ids { - err = r.Remove(id) - if err != nil { - return err - } - } - r.Close() - return k.Remove(root) -} - -func (k *Key) Close() error { - err := k.Key.Close() - k.Key = 0 - return err -} - -func (k *Key) Enumerate() ([]string, error) { - escapedIDs, err := k.ReadSubKeyNames(0) - if err != nil { - return nil, err - } - var ids []string - for _, e := range escapedIDs { - id, err := url.PathUnescape(e) - if err == nil { - ids = append(ids, id) - } - } - return ids, nil -} - -func (k *Key) open(name string) (*Key, error) { - fullpath := filepath.Join(k.Name, name) - nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS) - if err != nil { - return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} - } - return &Key{nk, fullpath}, nil -} - -func (k *Key) openid(id string) (*Key, error) { - escaped := url.PathEscape(id) - fullpath := filepath.Join(k.Name, escaped) - nk, err := k.open(escaped) - if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { - return nil, &NotFoundError{id} - } - if err != nil { - return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} - } - return nk, nil -} - -func (k *Key) Remove(id string) error { - escaped := url.PathEscape(id) - err := registry.DeleteKey(k.Key, escaped) - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NotFoundError{id} - } - return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err} - } - return nil -} - -func (k *Key) set(id string, create bool, key string, state interface{}) error { - var sk *Key - var err error - if create { - var existing bool - eid := url.PathEscape(id) - sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS) - if err != nil { - return err - } - defer sk.Close() - if existing { - sk.Close() - return fmt.Errorf("container %s already exists", id) - } - } else { - sk, err = k.openid(id) - if err != nil { - return err - } - defer sk.Close() - } - switch reflect.TypeOf(state).Kind() { - case reflect.Bool: - v := uint32(0) - if state.(bool) { - v = 1 - } - err = sk.SetDWordValue(key, v) - case reflect.Int: - err = sk.SetQWordValue(key, uint64(state.(int))) - case reflect.String: - err = sk.SetStringValue(key, state.(string)) - default: - var js []byte - js, err = json.Marshal(state) - if err != nil { - return err - } - err = sk.SetBinaryValue(key, js) - } - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err} - } - return nil -} - -func (k *Key) Create(id, key string, state interface{}) error { - return k.set(id, true, key, state) -} - -func (k *Key) Set(id, key string, state interface{}) error { - return k.set(id, false, key, state) -} - -func (k *Key) Clear(id, key string) error { - sk, err := k.openid(id) - if err != nil { - return err - } - defer sk.Close() - err = sk.DeleteValue(key) - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err} - } - return nil -} - -func (k *Key) Get(id, key string, state interface{}) error { - sk, err := k.openid(id) - if err != nil { - return err - } - defer sk.Close() - - var js []byte - switch reflect.TypeOf(state).Elem().Kind() { - case reflect.Bool: - var v uint64 - v, _, err = sk.GetIntegerValue(key) - if err == nil { - *state.(*bool) = v != 0 - } - case reflect.Int: - var v uint64 - v, _, err = sk.GetIntegerValue(key) - if err == nil { - *state.(*int) = int(v) - } - case reflect.String: - var v string - v, _, err = sk.GetStringValue(key) - if err == nil { - *state.(*string) = string(v) - } - default: - js, _, err = sk.GetBinaryValue(key) - } - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err} - } - if js != nil { - err = json.Unmarshal(js, state) - } - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go deleted file mode 100644 index 4e349ad..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package regstate - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - - procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") -) - -func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go deleted file mode 100644 index a161c20..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go +++ /dev/null @@ -1,71 +0,0 @@ -package runhcs - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "syscall" - "time" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -// ContainerState represents the platform agnostic pieces relating to a -// running container's status and state -type ContainerState struct { - // Version is the OCI version for the container - Version string `json:"ociVersion"` - // ID is the container ID - ID string `json:"id"` - // InitProcessPid is the init process id in the parent namespace - InitProcessPid int `json:"pid"` - // Status is the current status of the container, running, paused, ... - Status string `json:"status"` - // Bundle is the path on the filesystem to the bundle - Bundle string `json:"bundle"` - // Rootfs is a path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` - // Annotations is the user defined annotations added to the config. - Annotations map[string]string `json:"annotations,omitempty"` - // The owner of the state directory (the owner of the container). - Owner string `json:"owner"` -} - -// GetErrorFromPipe returns reads from `pipe` and verifies if the operation -// returned success or error. If error converts that to an error and returns. If -// `p` is not nill will issue a `Kill` and `Wait` for exit. -func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { - serr, err := ioutil.ReadAll(pipe) - if err != nil { - return err - } - - if bytes.Equal(serr, ShimSuccess) { - return nil - } - - extra := "" - if p != nil { - _ = p.Kill() - state, err := p.Wait() - if err != nil { - panic(err) - } - extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode) - } - if len(serr) == 0 { - return fmt.Errorf("unknown shim failure%s", extra) - } - - return errors.New(string(serr)) -} - -// VMPipePath returns the named pipe path for the vm shim. -func VMPipePath(hostUniqueID guid.GUID) string { - return SafePipePath("runhcs-vm-" + hostUniqueID.String()) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go deleted file mode 100644 index dcbb190..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go +++ /dev/null @@ -1,16 +0,0 @@ -package runhcs - -import "net/url" - -const ( - SafePipePrefix = `\\.\pipe\ProtectedPrefix\Administrators\` -) - -// ShimSuccess is the byte stream returned on a successful operation. -var ShimSuccess = []byte{0, 'O', 'K', 0} - -func SafePipePath(name string) string { - // Use a pipe in the Administrators protected prefixed to prevent malicious - // squatting. - return SafePipePrefix + url.PathEscape(name) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go deleted file mode 100644 index 2c8957b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go +++ /dev/null @@ -1,43 +0,0 @@ -package runhcs - -import ( - "encoding/json" - - "github.com/Microsoft/go-winio" -) - -// VMRequestOp is an operation that can be issued to a VM shim. -type VMRequestOp string - -const ( - // OpCreateContainer is a create container request. - OpCreateContainer VMRequestOp = "create" - // OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM. - OpSyncNamespace VMRequestOp = "sync" - // OpUnmountContainer is a container unmount request. - OpUnmountContainer VMRequestOp = "unmount" - // OpUnmountContainerDiskOnly is a container unmount disk request. - OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk" -) - -// VMRequest is an operation request that is issued to a VM shim. -type VMRequest struct { - ID string - Op VMRequestOp -} - -// IssueVMRequest issues a request to a shim at the given pipe. -func IssueVMRequest(pipepath string, req *VMRequest) error { - pipe, err := winio.DialPipe(pipepath, nil) - if err != nil { - return err - } - defer pipe.Close() - if err := json.NewEncoder(pipe).Encode(req); err != nil { - return err - } - if err := GetErrorFromPipe(pipe, nil); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go deleted file mode 100644 index 66b8d7e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ /dev/null @@ -1,375 +0,0 @@ -package safefile - -import ( - "errors" - "io" - "os" - "path/filepath" - "strings" - "syscall" - "unicode/utf16" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/longpath" - "github.com/Microsoft/hcsshim/internal/winapi" - - winio "github.com/Microsoft/go-winio" -) - -func OpenRoot(path string) (*os.File, error) { - longpath, err := longpath.LongAbs(path) - if err != nil { - return nil, err - } - return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) -} - -func cleanGoStringRelativePath(path string) (string, error) { - path = filepath.Clean(path) - if strings.Contains(path, ":") { - // Since alternate data streams must follow the file they - // are attached to, finding one here (out of order) is invalid. - return "", errors.New("path contains invalid character `:`") - } - fspath := filepath.FromSlash(path) - if len(fspath) > 0 && fspath[0] == '\\' { - return "", errors.New("expected relative path") - } - return fspath, nil -} - -func ntRelativePath(path string) ([]uint16, error) { - fspath, err := cleanGoStringRelativePath(path) - if err != nil { - return nil, err - } - - path16 := utf16.Encode(([]rune)(fspath)) - if len(path16) > 32767 { - return nil, syscall.ENAMETOOLONG - } - - return path16, nil -} - -// openRelativeInternal opens a relative path from the given root, failing if -// any of the intermediate path components are reparse points. -func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { - var ( - h uintptr - iosb winapi.IOStatusBlock - oa winapi.ObjectAttributes - ) - - cleanRelativePath, err := cleanGoStringRelativePath(path) - if err != nil { - return nil, err - } - - if root == nil || root.Fd() == 0 { - return nil, errors.New("missing root directory") - } - - pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) - if err != nil { - return nil, err - } - - oa.Length = unsafe.Sizeof(oa) - oa.ObjectName = pathUnicode - oa.RootDirectory = uintptr(root.Fd()) - oa.Attributes = winapi.OBJ_DONT_REPARSE - status := winapi.NtCreateFile( - &h, - accessMask|syscall.SYNCHRONIZE, - &oa, - &iosb, - nil, - 0, - shareFlags, - createDisposition, - winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, - nil, - 0, - ) - if status != 0 { - return nil, winapi.RtlNtStatusToDosError(status) - } - - fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) - if err != nil { - syscall.Close(syscall.Handle(h)) - return nil, err - } - - return os.NewFile(h, fullPath), nil -} - -// OpenRelative opens a relative path from the given root, failing if -// any of the intermediate path components are reparse points. -func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { - f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) - if err != nil { - err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} - } - return f, err -} - -// LinkRelative creates a hard link from oldname to newname (relative to oldroot -// and newroot), failing if any of the intermediate path components are reparse -// points. -func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { - // Open the old file. - oldf, err := openRelativeInternal( - oldname, - oldroot, - syscall.FILE_WRITE_ATTRIBUTES, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - 0, - ) - if err != nil { - return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} - } - defer oldf.Close() - - // Open the parent of the new file. - var parent *os.File - parentPath := filepath.Dir(newname) - if parentPath != "." { - parent, err = openRelativeInternal( - parentPath, - newroot, - syscall.GENERIC_READ, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_DIRECTORY_FILE) - if err != nil { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} - } - defer parent.Close() - - fi, err := winio.GetFileBasicInfo(parent) - if err != nil { - return err - } - if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} - } - - } else { - parent = newroot - } - - // Issue an NT call to create the link. This will be safe because NT will - // not open any more directories to create the link, so it cannot walk any - // more reparse points. - newbase := filepath.Base(newname) - newbase16, err := ntRelativePath(newbase) - if err != nil { - return err - } - - size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 - linkinfoBuffer := winapi.LocalAlloc(0, size) - defer winapi.LocalFree(linkinfoBuffer) - - linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) - linkinfo.RootDirectory = parent.Fd() - linkinfo.FileNameLength = uint32(len(newbase16) * 2) - copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) - - var iosb winapi.IOStatusBlock - status := winapi.NtSetInformationFile( - oldf.Fd(), - &iosb, - linkinfoBuffer, - uint32(size), - winapi.FileLinkInformationClass, - ) - if status != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} - } - - return nil -} - -// deleteOnClose marks a file to be deleted when the handle is closed. -func deleteOnClose(f *os.File) error { - disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} - var iosb winapi.IOStatusBlock - status := winapi.NtSetInformationFile( - f.Fd(), - &iosb, - uintptr(unsafe.Pointer(&disposition)), - uint32(unsafe.Sizeof(disposition)), - winapi.FileDispositionInformationExClass, - ) - if status != 0 { - return winapi.RtlNtStatusToDosError(status) - } - return nil -} - -// clearReadOnly clears the readonly attribute on a file. -func clearReadOnly(f *os.File) error { - bi, err := winio.GetFileBasicInfo(f) - if err != nil { - return err - } - if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { - return nil - } - sbi := winio.FileBasicInfo{ - FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, - } - if sbi.FileAttributes == 0 { - sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL - } - return winio.SetFileBasicInfo(f, &sbi) -} - -// RemoveRelative removes a file or directory relative to a root, failing if any -// intermediate path components are reparse points. -func RemoveRelative(path string, root *os.File) error { - f, err := openRelativeInternal( - path, - root, - winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err == nil { - defer f.Close() - err = deleteOnClose(f) - if err == syscall.ERROR_ACCESS_DENIED { - // Maybe the file is marked readonly. Clear the bit and retry. - _ = clearReadOnly(f) - err = deleteOnClose(f) - } - } - if err != nil { - return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} - } - return nil -} - -// RemoveAllRelative removes a directory tree relative to a root, failing if any -// intermediate path components are reparse points. -func RemoveAllRelative(path string, root *os.File) error { - fi, err := LstatRelative(path, root) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes - if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { - // If this is a reparse point, it can't have children. Simple remove will do. - err := RemoveRelative(path, root) - if err == nil || os.IsNotExist(err) { - return nil - } - return err - } - - // It is necessary to use os.Open as Readdirnames does not work with - // OpenRelative. This is safe because the above lstatrelative fails - // if the target is outside the root, and we know this is not a - // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. - fd, err := os.Open(filepath.Join(root.Name(), path)) - if err != nil { - if os.IsNotExist(err) { - // Race. It was deleted between the Lstat and Open. - // Return nil per RemoveAll's docs. - return nil - } - return err - } - - // Remove contents & return first error. - for { - names, err1 := fd.Readdirnames(100) - for _, name := range names { - err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root) - if err == nil { - err = err1 - } - } - if err1 == io.EOF { - break - } - // If Readdirnames returned an error, use it. - if err == nil { - err = err1 - } - if len(names) == 0 { - break - } - } - fd.Close() - - // Remove directory. - err1 := RemoveRelative(path, root) - if err1 == nil || os.IsNotExist(err1) { - return nil - } - if err == nil { - err = err1 - } - return err -} - -// MkdirRelative creates a directory relative to a root, failing if any -// intermediate path components are reparse points. -func MkdirRelative(path string, root *os.File) error { - f, err := openRelativeInternal( - path, - root, - 0, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_CREATE, - winapi.FILE_DIRECTORY_FILE) - if err == nil { - f.Close() - } else { - err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} - } - return err -} - -// LstatRelative performs a stat operation on a file relative to a root, failing -// if any intermediate path components are reparse points. -func LstatRelative(path string, root *os.File) (os.FileInfo, error) { - f, err := openRelativeInternal( - path, - root, - winapi.FILE_READ_ATTRIBUTES, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err != nil { - return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} - } - defer f.Close() - return f.Stat() -} - -// EnsureNotReparsePointRelative validates that a given file (relative to a -// root) and all intermediate path components are not a reparse points. -func EnsureNotReparsePointRelative(path string, root *os.File) error { - // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. - f, err := OpenRelative( - path, - root, - 0, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - 0) - if err != nil { - return err - } - f.Close() - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go deleted file mode 100644 index eaf39fa..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go +++ /dev/null @@ -1,74 +0,0 @@ -package timeout - -import ( - "os" - "strconv" - "time" -) - -var ( - // defaultTimeout is the timeout for most operations that is not overridden. - defaultTimeout = 4 * time.Minute - - // defaultTimeoutTestdRetry is the retry loop timeout for testd to respond - // for a disk to come online in LCOW. - defaultTimeoutTestdRetry = 5 * time.Second -) - -// External variables for HCSShim consumers to use. -var ( - // SystemCreate is the timeout for creating a compute system - SystemCreate time.Duration = defaultTimeout - - // SystemStart is the timeout for starting a compute system - SystemStart time.Duration = defaultTimeout - - // SystemPause is the timeout for pausing a compute system - SystemPause time.Duration = defaultTimeout - - // SystemResume is the timeout for resuming a compute system - SystemResume time.Duration = defaultTimeout - - // SystemSave is the timeout for saving a compute system - SystemSave time.Duration = defaultTimeout - - // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. - SyscallWatcher time.Duration = defaultTimeout - - // Tar2VHD is the timeout for the tar2vhd operation to complete - Tar2VHD time.Duration = defaultTimeout - - // ExternalCommandToStart is the timeout for external commands to start - ExternalCommandToStart = defaultTimeout - - // ExternalCommandToComplete is the timeout for external commands to complete. - // Generally this means copying data from their stdio pipes. - ExternalCommandToComplete = defaultTimeout - - // TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW - TestDRetryLoop = defaultTimeoutTestdRetry -) - -func init() { - SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate) - SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) - SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) - SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) - SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) - SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) - Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) - ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) - ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete) - TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop) -} - -func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration { - envTimeout := os.Getenv(env) - if len(envTimeout) > 0 { - e, err := strconv.Atoi(envTimeout) - if err == nil && e > 0 { - return time.Second * time.Duration(e) - } - } - return defaultValue -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go deleted file mode 100644 index e7f114b..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ /dev/null @@ -1,610 +0,0 @@ -package vmcompute - -import ( - gcontext "context" - "syscall" - "time" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/timeout" - "go.opencensus.io/trace" -) - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go - -//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? -//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? -//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? -//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? -//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? -//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? -//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? -//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? -//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? -//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? -//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? -//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? -//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? -//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? -//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? - -//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? -//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? -//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? -//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? -//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? -//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? -//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? -//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? -//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? -//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? -//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? - -// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously -const errVmcomputeOperationPending = syscall.Errno(0xC0370103) - -// HcsSystem is the handle associated with a created compute system. -type HcsSystem syscall.Handle - -// HcsProcess is the handle associated with a created process in a compute -// system. -type HcsProcess syscall.Handle - -// HcsCallback is the handle associated with the function to call when events -// occur. -type HcsCallback syscall.Handle - -// HcsProcessInformation is the structure used when creating or getting process -// info. -type HcsProcessInformation struct { - // ProcessId is the pid of the created process. - ProcessId uint32 - reserved uint32 //nolint:structcheck - // StdInput is the handle associated with the stdin of the process. - StdInput syscall.Handle - // StdOutput is the handle associated with the stdout of the process. - StdOutput syscall.Handle - // StdError is the handle associated with the stderr of the process. - StdError syscall.Handle -} - -func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { - if timeout > 0 { - var cancel gcontext.CancelFunc - ctx, cancel = gcontext.WithTimeout(ctx, timeout) - defer cancel() - } - - done := make(chan error, 1) - go func() { - done <- f() - }() - select { - case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { - log.G(ctx).WithField(logfields.Timeout, timeout). - Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") - } - return ctx.Err() - case err := <-done: - return err - } -} - -func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("query", query)) - - return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - computeSystemsp *uint16 - resultp *uint16 - ) - err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) - if computeSystemsp != nil { - computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes( - trace.StringAttribute("id", id), - trace.StringAttribute("configuration", configuration)) - - return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { - var resultp *uint16 - err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsOpenComputeSystem(id, &computeSystem, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsCloseComputeSystem(computeSystem) - }) -} - -func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemStart, func() error { - var resultp *uint16 - err := hcsStartComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsShutdownComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsTerminateComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemPause, func() error { - var resultp *uint16 - err := hcsPauseComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemResume, func() error { - var resultp *uint16 - err := hcsResumeComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) - - return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - propertiesp *uint16 - resultp *uint16 - ) - err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) - if propertiesp != nil { - properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("configuration", configuration)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("settings", settings)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyServiceSettings(settings, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { - return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) - }) -} - -func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsUnregisterComputeSystemCallback(callbackHandle) - }) -} - -func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCreateProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("processParameters", processParameters)) - - return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsOpenProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) - - return process, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsOpenProcess(computeSystem, pid, &process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCloseProcess") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsCloseProcess(process) - }) -} - -func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsTerminateProcess(process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsSignalProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsSignalProcess(process, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsGetProcessInfo(process, &processInformation, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - processPropertiesp *uint16 - resultp *uint16 - ) - err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) - if processPropertiesp != nil { - processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("settings", settings)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyProcess(process, settings, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) - - return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - propertiesp *uint16 - resultp *uint16 - ) - err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) - if propertiesp != nil { - properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { - return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) - }) -} - -func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsUnregisterProcessCallback(callbackHandle) - }) -} - -func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsSaveComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go deleted file mode 100644 index cae5505..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go +++ /dev/null @@ -1,581 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package vmcompute - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - - procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") - procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") - procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") - procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") - procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") - procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") - procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") - procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") - procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") - procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") - procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") - procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") - procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") - procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") - procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") - procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") - procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") - procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") - procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") - procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") - procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") - procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") - procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") - procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") - procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") - procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") -) - -func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcsEnumerateComputeSystems(_p0, computeSystems, result) -} - -func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { - if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) -} - -func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { - if hr = procHcsCreateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _hcsOpenComputeSystem(_p0, computeSystem, result) -} - -func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { - if hr = procHcsOpenComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { - if hr = procHcsCloseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsStartComputeSystem(computeSystem, _p0, result) -} - -func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsStartComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsShutdownComputeSystem(computeSystem, _p0, result) -} - -func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsShutdownComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsTerminateComputeSystem(computeSystem, _p0, result) -} - -func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsTerminateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsPauseComputeSystem(computeSystem, _p0, result) -} - -func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsPauseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsResumeComputeSystem(computeSystem, _p0, result) -} - -func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsResumeComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) - if hr != nil { - return - } - return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) -} - -func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _hcsModifyComputeSystem(computeSystem, _p0, result) -} - -func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { - if hr = procHcsModifyComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcsModifyServiceSettings(_p0, result) -} - -func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyServiceSettings.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { - if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { - if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSaveComputeSystem(computeSystem, _p0, result) -} - -func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsSaveComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(processParameters) - if hr != nil { - return - } - return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) -} - -func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { - if hr = procHcsCreateProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { - if hr = procHcsOpenProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCloseProcess(process HcsProcess) (hr error) { - if hr = procHcsCloseProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { - if hr = procHcsTerminateProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSignalProcess(process, _p0, result) -} - -func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { - if hr = procHcsSignalProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { - if hr = procHcsGetProcessInfo.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { - if hr = procHcsGetProcessProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcsModifyProcess(process, _p0, result) -} - -func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) - if hr != nil { - return - } - return _hcsGetServiceProperties(_p0, properties, result) -} - -func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetServiceProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { - if hr = procHcsRegisterProcessCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { - if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go deleted file mode 100644 index ff81ac2..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ /dev/null @@ -1,27 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ActivateLayer will find the layer with the given id and mount it's filesystem. -// For a read/write layer, the mounted filesystem will appear as a volume on the -// host, while a read-only layer is generally expected to be a no-op. -// An activated layer must later be deactivated via DeactivateLayer. -func ActivateLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::ActivateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = activateLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go deleted file mode 100644 index 3ec708d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go +++ /dev/null @@ -1,182 +0,0 @@ -package wclayer - -import ( - "context" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/Microsoft/hcsshim/internal/winapi" - "go.opencensus.io/trace" -) - -type baseLayerWriter struct { - ctx context.Context - s *trace.Span - - root *os.File - f *os.File - bw *winio.BackupFileWriter - err error - hasUtilityVM bool - dirInfo []dirInfo -} - -type dirInfo struct { - path string - fileInfo winio.FileBasicInfo -} - -// reapplyDirectoryTimes reapplies directory modification, creation, etc. times -// after processing of the directory tree has completed. The times are expected -// to be ordered such that parent directories come before child directories. -func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { - for i := range dis { - di := &dis[len(dis)-i-1] // reverse order: process child directories first - f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) - if err != nil { - return err - } - - err = winio.SetFileBasicInfo(f, &di.fileInfo) - f.Close() - if err != nil { - return err - } - - } - return nil -} - -func (w *baseLayerWriter) closeCurrentFile() error { - if w.f != nil { - err := w.bw.Close() - err2 := w.f.Close() - w.f = nil - w.bw = nil - if err != nil { - return err - } - if err2 != nil { - return err2 - } - } - return nil -} - -func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - if filepath.ToSlash(name) == `UtilityVM/Files` { - w.hasUtilityVM = true - } - - var f *os.File - defer func() { - if f != nil { - f.Close() - } - }() - - extraFlags := uint32(0) - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - extraFlags |= winapi.FILE_DIRECTORY_FILE - w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) - } - - mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) - f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) - if err != nil { - return hcserror.New(err, "Failed to safefile.OpenRelative", name) - } - - err = winio.SetFileBasicInfo(f, fileInfo) - if err != nil { - return hcserror.New(err, "Failed to SetFileBasicInfo", name) - } - - w.f = f - w.bw = winio.NewBackupFileWriter(f, true) - f = nil - return nil -} - -func (w *baseLayerWriter) AddLink(name string, target string) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - return safefile.LinkRelative(target, w.root, name, w.root) -} - -func (w *baseLayerWriter) Remove(name string) error { - return errors.New("base layer cannot have tombstones") -} - -func (w *baseLayerWriter) Write(b []byte) (int, error) { - n, err := w.bw.Write(b) - if err != nil { - w.err = err - } - return n, err -} - -func (w *baseLayerWriter) Close() (err error) { - defer w.s.End() - defer func() { oc.SetSpanStatus(w.s, err) }() - defer func() { - w.root.Close() - w.root = nil - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - if w.err == nil { - // Restore the file times of all the directories, since they may have - // been modified by creating child directories. - err = reapplyDirectoryTimes(w.root, w.dirInfo) - if err != nil { - return err - } - - err = ProcessBaseLayer(w.ctx, w.root.Name()) - if err != nil { - return err - } - - if w.hasUtilityVM { - err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root) - if err != nil { - return err - } - err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) - if err != nil { - return err - } - } - } - return w.err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go deleted file mode 100644 index ffee31a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ /dev/null @@ -1,27 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// CreateLayer creates a new, empty, read-only layer on the filesystem based on -// the parent layer provided. -func CreateLayer(ctx context.Context, path, parent string) (err error) { - title := "hcsshim::CreateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parent", parent)) - - err = createLayer(&stdDriverInfo, path, parent) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go deleted file mode 100644 index 5a3809a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ /dev/null @@ -1,34 +0,0 @@ -package wclayer - -import ( - "context" - "strings" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// CreateScratchLayer creates and populates new read-write layer for use by a container. -// This requires the full list of paths to all parent layers up to the base -func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { - title := "hcsshim::CreateScratchLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = createSandboxLayer(&stdDriverInfo, path, 0, layers) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go deleted file mode 100644 index d5bf2f5..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go +++ /dev/null @@ -1,24 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. -func DeactivateLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::DeactivateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = deactivateLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title+"- failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go deleted file mode 100644 index 787054e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ /dev/null @@ -1,25 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// DestroyLayer will remove the on-disk files representing the layer with the given -// path, including that layer's containing folder, if any. -func DestroyLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::DestroyLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = destroyLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go deleted file mode 100644 index 22f7605..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ /dev/null @@ -1,140 +0,0 @@ -package wclayer - -import ( - "context" - "os" - "path/filepath" - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" - "go.opencensus.io/trace" -) - -// ExpandScratchSize expands the size of a layer to at least size bytes. -func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { - title := "hcsshim::ExpandScratchSize" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.Int64Attribute("size", int64(size))) - - err = expandSandboxSize(&stdDriverInfo, path, size) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - - // Manually expand the volume now in order to work around bugs in 19H1 and - // prerelease versions of Vb. Remove once this is fixed in Windows. - if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { - err = expandSandboxVolume(ctx, path) - if err != nil { - return err - } - } - return nil -} - -type virtualStorageType struct { - DeviceID uint32 - VendorID [16]byte -} - -type openVersion2 struct { - GetInfoOnly int32 // bool but 4-byte aligned - ReadOnly int32 // bool but 4-byte aligned - ResiliencyGUID [16]byte // GUID -} - -type openVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 openVersion2 -} - -func attachVhd(path string) (syscall.Handle, error) { - var ( - defaultType virtualStorageType - handle syscall.Handle - ) - parameters := openVirtualDiskParameters{Version: 2} - err := openVirtualDisk( - &defaultType, - path, - 0, - 0, - ¶meters, - &handle) - if err != nil { - return 0, &os.PathError{Op: "OpenVirtualDisk", Path: path, Err: err} - } - err = attachVirtualDisk(handle, 0, 0, 0, 0, 0) - if err != nil { - syscall.Close(handle) - return 0, &os.PathError{Op: "AttachVirtualDisk", Path: path, Err: err} - } - return handle, nil -} - -func expandSandboxVolume(ctx context.Context, path string) error { - // Mount the sandbox VHD temporarily. - vhdPath := filepath.Join(path, "sandbox.vhdx") - vhd, err := attachVhd(vhdPath) - if err != nil { - return &os.PathError{Op: "OpenVirtualDisk", Path: vhdPath, Err: err} - } - defer syscall.Close(vhd) - - // Open the volume. - volumePath, err := GetLayerMountPath(ctx, path) - if err != nil { - return err - } - if volumePath[len(volumePath)-1] == '\\' { - volumePath = volumePath[:len(volumePath)-1] - } - volume, err := os.OpenFile(volumePath, os.O_RDWR, 0) - if err != nil { - return err - } - defer volume.Close() - - // Get the volume's underlying partition size in NTFS clusters. - var ( - partitionSize int64 - bytes uint32 - ) - const _IOCTL_DISK_GET_LENGTH_INFO = 0x0007405C - err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _IOCTL_DISK_GET_LENGTH_INFO, nil, 0, (*byte)(unsafe.Pointer(&partitionSize)), 8, &bytes, nil) - if err != nil { - return &os.PathError{Op: "IOCTL_DISK_GET_LENGTH_INFO", Path: volume.Name(), Err: err} - } - const ( - clusterSize = 4096 - sectorSize = 512 - ) - targetClusters := partitionSize / clusterSize - - // Get the volume's current size in NTFS clusters. - var volumeSize int64 - err = getDiskFreeSpaceEx(volume.Name()+"\\", nil, &volumeSize, nil) - if err != nil { - return &os.PathError{Op: "GetDiskFreeSpaceEx", Path: volume.Name(), Err: err} - } - volumeClusters := volumeSize / clusterSize - - // Only resize the volume if there is space to grow, otherwise this will - // fail with invalid parameter. NTFS reserves one cluster. - if volumeClusters+1 < targetClusters { - targetSectors := targetClusters * (clusterSize / sectorSize) - const _FSCTL_EXTEND_VOLUME = 0x000900F0 - err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _FSCTL_EXTEND_VOLUME, (*byte)(unsafe.Pointer(&targetSectors)), 8, nil, 0, &bytes, nil) - if err != nil { - return &os.PathError{Op: "FSCTL_EXTEND_VOLUME", Path: volume.Name(), Err: err} - } - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go deleted file mode 100644 index 09f0de1..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ /dev/null @@ -1,94 +0,0 @@ -package wclayer - -import ( - "context" - "io/ioutil" - "os" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ExportLayer will create a folder at exportFolderPath and fill that folder with -// the transport format version of the layer identified by layerId. This transport -// format includes any metadata required for later importing the layer (using -// ImportLayer), and requires the full list of parent layer paths in order to -// perform the export. -func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { - title := "hcsshim::ExportLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("exportFolderPath", exportFolderPath), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} - -type LayerReader interface { - Next() (string, int64, *winio.FileBasicInfo, error) - Read(b []byte) (int, error) - Close() error -} - -// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. -// The caller must have taken the SeBackupPrivilege privilege -// to call this and any methods on the resulting LayerReader. -func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { - ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader") - defer func() { - if err != nil { - oc.SetSpanStatus(span, err) - span.End() - } - }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - exportPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - err = ExportLayer(ctx, path, exportPath, parentLayerPaths) - if err != nil { - os.RemoveAll(exportPath) - return nil, err - } - return &legacyLayerReaderWrapper{ - ctx: ctx, - s: span, - legacyLayerReader: newLegacyLayerReader(exportPath), - }, nil -} - -type legacyLayerReaderWrapper struct { - ctx context.Context - s *trace.Span - - *legacyLayerReader -} - -func (r *legacyLayerReaderWrapper) Close() (err error) { - defer r.s.End() - defer func() { oc.SetSpanStatus(r.s, err) }() - - err = r.legacyLayerReader.Close() - os.RemoveAll(r.root) - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go deleted file mode 100644 index 4d22d0e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ /dev/null @@ -1,50 +0,0 @@ -package wclayer - -import ( - "context" - "syscall" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GetLayerMountPath will look for a mounted layer with the given path and return -// the path at which that layer can be accessed. This path may be a volume path -// if the layer is a mounted read-write layer, otherwise it is expected to be the -// folder path at which the layer is stored. -func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { - title := "hcsshim::GetLayerMountPath" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - var mountPathLength uintptr = 0 - - // Call the procedure itself. - log.G(ctx).Debug("Calling proc (1)") - err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) - if err != nil { - return "", hcserror.New(err, title+" - failed", "(first call)") - } - - // Allocate a mount path of the returned length. - if mountPathLength == 0 { - return "", nil - } - mountPathp := make([]uint16, mountPathLength) - mountPathp[0] = 0 - - // Call the procedure again - log.G(ctx).Debug("Calling proc (2)") - err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) - if err != nil { - return "", hcserror.New(err, title+" - failed", "(second call)") - } - - mountPath := syscall.UTF16ToString(mountPathp[0:]) - span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) - return mountPath, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go deleted file mode 100644 index bcc8fbd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ /dev/null @@ -1,29 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GetSharedBaseImages will enumerate the images stored in the common central -// image store and return descriptive info about those images for the purpose -// of registering them with the graphdriver, graph, and tagstore. -func GetSharedBaseImages(ctx context.Context) (_ string, err error) { - title := "hcsshim::GetSharedBaseImages" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - var buffer *uint16 - err = getBaseImages(&buffer) - if err != nil { - return "", hcserror.New(err, title+" - failed", "") - } - imageData := interop.ConvertAndFreeCoTaskMemString(buffer) - span.AddAttributes(trace.StringAttribute("imageData", imageData)) - return imageData, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go deleted file mode 100644 index 3eaca27..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ /dev/null @@ -1,26 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GrantVmAccess adds access to a file for a given VM -func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { - title := "hcsshim::GrantVmAccess" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("vm-id", vmid), - trace.StringAttribute("path", filepath)) - - err = grantVmAccess(vmid, filepath) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go deleted file mode 100644 index b3c150d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ /dev/null @@ -1,166 +0,0 @@ -package wclayer - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/safefile" - "go.opencensus.io/trace" -) - -// ImportLayer will take the contents of the folder at importFolderPath and import -// that into a layer with the id layerId. Note that in order to correctly populate -// the layer and interperet the transport format, all parent layers must already -// be present on the system at the paths provided in parentLayerPaths. -func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { - title := "hcsshim::ImportLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("importFolderPath", importFolderPath), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = importLayer(&stdDriverInfo, path, importFolderPath, layers) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} - -// LayerWriter is an interface that supports writing a new container image layer. -type LayerWriter interface { - // Add adds a file to the layer with given metadata. - Add(name string, fileInfo *winio.FileBasicInfo) error - // AddLink adds a hard link to the layer. The target must already have been added. - AddLink(name string, target string) error - // Remove removes a file that was present in a parent layer from the layer. - Remove(name string) error - // Write writes data to the current file. The data must be in the format of a Win32 - // backup stream. - Write(b []byte) (int, error) - // Close finishes the layer writing process and releases any resources. - Close() error -} - -type legacyLayerWriterWrapper struct { - ctx context.Context - s *trace.Span - - *legacyLayerWriter - path string - parentLayerPaths []string -} - -func (r *legacyLayerWriterWrapper) Close() (err error) { - defer r.s.End() - defer func() { oc.SetSpanStatus(r.s, err) }() - defer os.RemoveAll(r.root.Name()) - defer r.legacyLayerWriter.CloseRoots() - - err = r.legacyLayerWriter.Close() - if err != nil { - return err - } - - if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { - return err - } - for _, name := range r.Tombstones { - if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { - return err - } - } - // Add any hard links that were collected. - for _, lnk := range r.PendingLinks { - if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { - return err - } - if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { - return err - } - } - - // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone - // deletion and hard link creation. This is because Tombstone deletion and hard link - // creation updates the directory last write timestamps so that will change the - // timestamps added by the `Add` call. Some container applications depend on the - // correctness of these timestamps and so we should change the timestamps back to - // the original value (i.e the value provided in the Add call) after this - // processing is done. - err = reapplyDirectoryTimes(r.destRoot, r.changedDi) - if err != nil { - return err - } - - // Prepare the utility VM for use if one is present in the layer. - if r.HasUtilityVM { - err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) - if err != nil { - return err - } - err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) - if err != nil { - return err - } - } - return nil -} - -// NewLayerWriter returns a new layer writer for creating a layer on disk. -// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges -// to call this and any methods on the resulting LayerWriter. -func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { - ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter") - defer func() { - if err != nil { - oc.SetSpanStatus(span, err) - span.End() - } - }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - if len(parentLayerPaths) == 0 { - // This is a base layer. It gets imported differently. - f, err := safefile.OpenRoot(path) - if err != nil { - return nil, err - } - return &baseLayerWriter{ - ctx: ctx, - s: span, - root: f, - }, nil - } - - importPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) - if err != nil { - return nil, err - } - return &legacyLayerWriterWrapper{ - ctx: ctx, - s: span, - legacyLayerWriter: w, - path: importPath, - parentLayerPaths: parentLayerPaths, - }, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go deleted file mode 100644 index c699997..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ /dev/null @@ -1,28 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// LayerExists will return true if a layer with the given id exists and is known -// to the system. -func LayerExists(ctx context.Context, path string) (_ bool, err error) { - title := "hcsshim::LayerExists" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - // Call the procedure itself. - var exists uint32 - err = layerExists(&stdDriverInfo, path, &exists) - if err != nil { - return false, hcserror.New(err, title+" - failed", "") - } - span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) - return exists != 0, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go deleted file mode 100644 index 0ce34a3..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go +++ /dev/null @@ -1,22 +0,0 @@ -package wclayer - -import ( - "context" - "path/filepath" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// LayerID returns the layer ID of a layer on disk. -func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { - title := "hcsshim::LayerID" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - _, file := filepath.Split(path) - return NameToGuid(ctx, file) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go deleted file mode 100644 index 1ec893c..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go +++ /dev/null @@ -1,97 +0,0 @@ -package wclayer - -// This file contains utility functions to support storage (graph) related -// functionality. - -import ( - "context" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/sirupsen/logrus" -) - -/* To pass into syscall, we need a struct matching the following: -enum GraphDriverType -{ - DiffDriver, - FilterDriver -}; - -struct DriverInfo { - GraphDriverType Flavour; - LPCWSTR HomeDir; -}; -*/ - -type driverInfo struct { - Flavour int - HomeDirp *uint16 -} - -var ( - utf16EmptyString uint16 - stdDriverInfo = driverInfo{1, &utf16EmptyString} -) - -/* To pass into syscall, we need a struct matching the following: -typedef struct _WC_LAYER_DESCRIPTOR { - - // - // The ID of the layer - // - - GUID LayerId; - - // - // Additional flags - // - - union { - struct { - ULONG Reserved : 31; - ULONG Dirty : 1; // Created from sandbox as a result of snapshot - }; - ULONG Value; - } Flags; - - // - // Path to the layer root directory, null-terminated - // - - PCWSTR Path; - -} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; -*/ -type WC_LAYER_DESCRIPTOR struct { - LayerId guid.GUID - Flags uint32 - Pathp *uint16 -} - -func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { - // Array of descriptors that gets constructed. - var layers []WC_LAYER_DESCRIPTOR - - for i := 0; i < len(parentLayerPaths); i++ { - g, err := LayerID(ctx, parentLayerPaths[i]) - if err != nil { - logrus.WithError(err).Debug("Failed to convert name to guid") - return nil, err - } - - p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) - if err != nil { - logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") - return nil, err - } - - layers = append(layers, WC_LAYER_DESCRIPTOR{ - LayerId: g, - Flags: 0, - Pathp: p, - }) - } - - return layers, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go deleted file mode 100644 index 83ba72c..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ /dev/null @@ -1,811 +0,0 @@ -package wclayer - -import ( - "bufio" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/longpath" - "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/Microsoft/hcsshim/internal/winapi" -) - -var errorIterationCanceled = errors.New("") - -var mutatedUtilityVMFiles = map[string]bool{ - `EFI\Microsoft\Boot\BCD`: true, - `EFI\Microsoft\Boot\BCD.LOG`: true, - `EFI\Microsoft\Boot\BCD.LOG1`: true, - `EFI\Microsoft\Boot\BCD.LOG2`: true, -} - -const ( - filesPath = `Files` - hivesPath = `Hives` - utilityVMPath = `UtilityVM` - utilityVMFilesPath = `UtilityVM\Files` -) - -func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { - return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) -} - -func hasPathPrefix(p, prefix string) bool { - return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' -} - -type fileEntry struct { - path string - fi os.FileInfo - err error -} - -type legacyLayerReader struct { - root string - result chan *fileEntry - proceed chan bool - currentFile *os.File - backupReader *winio.BackupFileReader -} - -// newLegacyLayerReader returns a new LayerReader that can read the Windows -// container layer transport format from disk. -func newLegacyLayerReader(root string) *legacyLayerReader { - r := &legacyLayerReader{ - root: root, - result: make(chan *fileEntry), - proceed: make(chan bool), - } - go r.walk() - return r -} - -func readTombstones(path string) (map[string]([]string), error) { - tf, err := os.Open(filepath.Join(path, "tombstones.txt")) - if err != nil { - return nil, err - } - defer tf.Close() - s := bufio.NewScanner(tf) - if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("Invalid tombstones file") - } - - ts := make(map[string]([]string)) - for s.Scan() { - t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` - dir := filepath.Dir(t) - ts[dir] = append(ts[dir], t) - } - if err = s.Err(); err != nil { - return nil, err - } - - return ts, nil -} - -func (r *legacyLayerReader) walkUntilCancelled() error { - root, err := longpath.LongAbs(r.root) - if err != nil { - return err - } - - r.root = root - ts, err := readTombstones(r.root) - if err != nil { - return err - } - - err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. - // Handle failure from what may be a golang bug in the conversion of - // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat - // which is called by filepath.Walk will fail when a filename contains - // unicode characters. Skip the recycle bin regardless which is goodness. - if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { - return filepath.SkipDir - } - - if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { - return nil - } - - r.result <- &fileEntry{path, info, nil} - if !<-r.proceed { - return errorIterationCanceled - } - - // List all the tombstones. - if info.IsDir() { - relPath, err := filepath.Rel(r.root, path) - if err != nil { - return err - } - if dts, ok := ts[relPath]; ok { - for _, t := range dts { - r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} - if !<-r.proceed { - return errorIterationCanceled - } - } - } - } - return nil - }) - if err == errorIterationCanceled { - return nil - } - if err == nil { - return io.EOF - } - return err -} - -func (r *legacyLayerReader) walk() { - defer close(r.result) - if !<-r.proceed { - return - } - - err := r.walkUntilCancelled() - if err != nil { - for { - r.result <- &fileEntry{err: err} - if !<-r.proceed { - return - } - } - } -} - -func (r *legacyLayerReader) reset() { - if r.backupReader != nil { - r.backupReader.Close() - r.backupReader = nil - } - if r.currentFile != nil { - r.currentFile.Close() - r.currentFile = nil - } -} - -func findBackupStreamSize(r io.Reader) (int64, error) { - br := winio.NewBackupStreamReader(r) - for { - hdr, err := br.Next() - if err != nil { - if err == io.EOF { - err = nil - } - return 0, err - } - if hdr.Id == winio.BackupData { - return hdr.Size, nil - } - } -} - -func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { - r.reset() - r.proceed <- true - fe := <-r.result - if fe == nil { - err = errors.New("LegacyLayerReader closed") - return - } - if fe.err != nil { - err = fe.err - return - } - - path, err = filepath.Rel(r.root, fe.path) - if err != nil { - return - } - - if fe.fi == nil { - // This is a tombstone. Return a nil fileInfo. - return - } - - if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { - fe.path += ".$wcidirs$" - } - - f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) - if err != nil { - return - } - defer func() { - if f != nil { - f.Close() - } - }() - - fileInfo, err = winio.GetFileBasicInfo(f) - if err != nil { - return - } - - if !hasPathPrefix(path, filesPath) { - size = fe.fi.Size() - r.backupReader = winio.NewBackupFileReader(f, false) - if path == hivesPath || path == filesPath { - // The Hives directory has a non-deterministic file time because of the - // nature of the import process. Use the times from System_Delta. - var g *os.File - g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) - if err != nil { - return - } - attr := fileInfo.FileAttributes - fileInfo, err = winio.GetFileBasicInfo(g) - g.Close() - if err != nil { - return - } - fileInfo.FileAttributes = attr - } - - // The creation time and access time get reset for files outside of the Files path. - fileInfo.CreationTime = fileInfo.LastWriteTime - fileInfo.LastAccessTime = fileInfo.LastWriteTime - - } else { - // The file attributes are written before the backup stream. - var attr uint32 - err = binary.Read(f, binary.LittleEndian, &attr) - if err != nil { - return - } - fileInfo.FileAttributes = attr - beginning := int64(4) - - // Find the accurate file size. - if !fe.fi.IsDir() { - size, err = findBackupStreamSize(f) - if err != nil { - err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} - return - } - } - - // Return back to the beginning of the backup stream. - _, err = f.Seek(beginning, 0) - if err != nil { - return - } - } - - r.currentFile = f - f = nil - return -} - -func (r *legacyLayerReader) Read(b []byte) (int, error) { - if r.backupReader == nil { - if r.currentFile == nil { - return 0, io.EOF - } - return r.currentFile.Read(b) - } - return r.backupReader.Read(b) -} - -func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) { - if r.backupReader == nil { - if r.currentFile == nil { - return 0, errors.New("no current file") - } - return r.currentFile.Seek(offset, whence) - } - return 0, errors.New("seek not supported on this stream") -} - -func (r *legacyLayerReader) Close() error { - r.proceed <- false - <-r.result - r.reset() - return nil -} - -type pendingLink struct { - Path, Target string - TargetRoot *os.File -} - -type pendingDir struct { - Path string - Root *os.File -} - -type legacyLayerWriter struct { - root *os.File - destRoot *os.File - parentRoots []*os.File - currentFile *os.File - bufWriter *bufio.Writer - currentFileName string - currentFileRoot *os.File - backupWriter *winio.BackupFileWriter - Tombstones []string - HasUtilityVM bool - changedDi []dirInfo - addedFiles map[string]bool - PendingLinks []pendingLink - pendingDirs []pendingDir - currentIsDir bool -} - -// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer -// transport format to disk. -func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { - w = &legacyLayerWriter{ - addedFiles: make(map[string]bool), - } - defer func() { - if err != nil { - w.CloseRoots() - w = nil - } - }() - w.root, err = safefile.OpenRoot(root) - if err != nil { - return - } - w.destRoot, err = safefile.OpenRoot(destRoot) - if err != nil { - return - } - for _, r := range parentRoots { - f, err := safefile.OpenRoot(r) - if err != nil { - return w, err - } - w.parentRoots = append(w.parentRoots, f) - } - w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536) - return -} - -func (w *legacyLayerWriter) CloseRoots() { - if w.root != nil { - w.root.Close() - w.root = nil - } - if w.destRoot != nil { - w.destRoot.Close() - w.destRoot = nil - } - for i := range w.parentRoots { - _ = w.parentRoots[i].Close() - } - w.parentRoots = nil -} - -func (w *legacyLayerWriter) initUtilityVM() error { - if !w.HasUtilityVM { - err := safefile.MkdirRelative(utilityVMPath, w.destRoot) - if err != nil { - return err - } - // Server 2016 does not support multiple layers for the utility VM, so - // clone the utility VM from the parent layer into this layer. Use hard - // links to avoid unnecessary copying, since most of the files are - // immutable. - err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) - if err != nil { - return fmt.Errorf("cloning the parent utility VM image failed: %s", err) - } - w.HasUtilityVM = true - } - return nil -} - -func (w *legacyLayerWriter) reset() error { - err := w.bufWriter.Flush() - if err != nil { - return err - } - w.bufWriter.Reset(ioutil.Discard) - if w.currentIsDir { - r := w.currentFile - br := winio.NewBackupStreamReader(r) - // Seek to the beginning of the backup stream, skipping the fileattrs - if _, err := r.Seek(4, io.SeekStart); err != nil { - return err - } - - for { - bhdr, err := br.Next() - if err == io.EOF { - // end of backupstream data - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupReparseData: - // The current file is a `.$wcidirs$` metadata file that - // describes a directory reparse point. Delete the placeholder - // directory to prevent future files being added into the - // destination of the reparse point during the ImportLayer call - if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil { - return err - } - w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) - default: - // ignore all other stream types, as we only care about directory reparse points - } - } - w.currentIsDir = false - } - if w.backupWriter != nil { - w.backupWriter.Close() - w.backupWriter = nil - } - if w.currentFile != nil { - w.currentFile.Close() - w.currentFile = nil - w.currentFileName = "" - w.currentFileRoot = nil - } - return nil -} - -// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata -func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { - src, err := safefile.OpenRelative( - subPath, - srcRoot, - syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err != nil { - return nil, err - } - defer src.Close() - srcr := winio.NewBackupFileReader(src, true) - defer srcr.Close() - - fileInfo, err = winio.GetFileBasicInfo(src) - if err != nil { - return nil, err - } - - extraFlags := uint32(0) - if isDir { - extraFlags |= winapi.FILE_DIRECTORY_FILE - } - dest, err := safefile.OpenRelative( - subPath, - destRoot, - syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - winapi.FILE_CREATE, - extraFlags) - if err != nil { - return nil, err - } - defer dest.Close() - - err = winio.SetFileBasicInfo(dest, fileInfo) - if err != nil { - return nil, err - } - - destw := winio.NewBackupFileWriter(dest, true) - defer func() { - cerr := destw.Close() - if err == nil { - err = cerr - } - }() - - _, err = io.Copy(destw, srcr) - if err != nil { - return nil, err - } - - return fileInfo, nil -} - -// cloneTree clones a directory tree using hard links. It skips hard links for -// the file names in the provided map and just copies those files. -func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { - var di []dirInfo - err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot) - if err != nil { - return err - } - err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) - if err != nil { - return err - } - - fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes - // Directories, reparse points, and files that will be mutated during - // utility VM import must be copied. All other files can be hard linked. - isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 - // In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink. - // See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc - // Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly - isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 - - if isDir || isReparsePoint || mutatedFiles[relPath] { - fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) - if err != nil { - return err - } - if isDir { - di = append(di, dirInfo{path: relPath, fileInfo: *fi}) - } - } else { - err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot) - if err != nil { - return err - } - } - - return nil - }) - if err != nil { - return err - } - - return reapplyDirectoryTimes(destRoot, di) -} - -func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { - if err := w.reset(); err != nil { - return err - } - - if name == utilityVMPath { - return w.initUtilityVM() - } - - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) - } - - name = filepath.Clean(name) - if hasPathPrefix(name, utilityVMPath) { - if !w.HasUtilityVM { - return errors.New("missing UtilityVM directory") - } - if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { - return errors.New("invalid UtilityVM layer") - } - createDisposition := uint32(winapi.FILE_OPEN) - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - st, err := safefile.LstatRelative(name, w.destRoot) - if err != nil && !os.IsNotExist(err) { - return err - } - if st != nil { - // Delete the existing file/directory if it is not the same type as this directory. - existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes - if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil { - return err - } - st = nil - } - } - if st == nil { - if err = safefile.MkdirRelative(name, w.destRoot); err != nil { - return err - } - } - } else { - // Overwrite any existing hard link. - err := safefile.RemoveRelative(name, w.destRoot) - if err != nil && !os.IsNotExist(err) { - return err - } - createDisposition = winapi.FILE_CREATE - } - - f, err := safefile.OpenRelative( - name, - w.destRoot, - syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - createDisposition, - winapi.FILE_OPEN_REPARSE_POINT, - ) - if err != nil { - return err - } - defer func() { - if f != nil { - f.Close() - _ = safefile.RemoveRelative(name, w.destRoot) - } - }() - - err = winio.SetFileBasicInfo(f, fileInfo) - if err != nil { - return err - } - - w.backupWriter = winio.NewBackupFileWriter(f, true) - w.bufWriter.Reset(w.backupWriter) - w.currentFile = f - w.currentFileName = name - w.currentFileRoot = w.destRoot - w.addedFiles[name] = true - f = nil - return nil - } - - fname := name - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - err := safefile.MkdirRelative(name, w.root) - if err != nil { - return err - } - fname += ".$wcidirs$" - w.currentIsDir = true - } - - f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) - if err != nil { - return err - } - defer func() { - if f != nil { - f.Close() - _ = safefile.RemoveRelative(fname, w.root) - } - }() - - strippedFi := *fileInfo - strippedFi.FileAttributes = 0 - err = winio.SetFileBasicInfo(f, &strippedFi) - if err != nil { - return err - } - - if hasPathPrefix(name, hivesPath) { - w.backupWriter = winio.NewBackupFileWriter(f, false) - w.bufWriter.Reset(w.backupWriter) - } else { - w.bufWriter.Reset(f) - // The file attributes are written before the stream. - err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) - if err != nil { - w.bufWriter.Reset(ioutil.Discard) - return err - } - } - - w.currentFile = f - w.currentFileName = name - w.currentFileRoot = w.root - w.addedFiles[name] = true - f = nil - return nil -} - -func (w *legacyLayerWriter) AddLink(name string, target string) error { - if err := w.reset(); err != nil { - return err - } - - target = filepath.Clean(target) - var roots []*os.File - if hasPathPrefix(target, filesPath) { - // Look for cross-layer hard link targets in the parent layers, since - // nothing is in the destination path yet. - roots = w.parentRoots - } else if hasPathPrefix(target, utilityVMFilesPath) { - // Since the utility VM is fully cloned into the destination path - // already, look for cross-layer hard link targets directly in the - // destination path. - roots = []*os.File{w.destRoot} - } - - if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { - return errors.New("invalid hard link in layer") - } - - // Find to try the target of the link in a previously added file. If that - // fails, search in parent layers. - var selectedRoot *os.File - if _, ok := w.addedFiles[target]; ok { - selectedRoot = w.destRoot - } else { - for _, r := range roots { - if _, err := safefile.LstatRelative(target, r); err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - selectedRoot = r - break - } - } - if selectedRoot == nil { - return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) - } - } - - // The link can't be written until after the ImportLayer call. - w.PendingLinks = append(w.PendingLinks, pendingLink{ - Path: name, - Target: target, - TargetRoot: selectedRoot, - }) - w.addedFiles[name] = true - return nil -} - -func (w *legacyLayerWriter) Remove(name string) error { - name = filepath.Clean(name) - if hasPathPrefix(name, filesPath) { - w.Tombstones = append(w.Tombstones, name) - } else if hasPathPrefix(name, utilityVMFilesPath) { - err := w.initUtilityVM() - if err != nil { - return err - } - // Make sure the path exists; os.RemoveAll will not fail if the file is - // already gone, and this needs to be a fatal error for diagnostics - // purposes. - if _, err := safefile.LstatRelative(name, w.destRoot); err != nil { - return err - } - err = safefile.RemoveAllRelative(name, w.destRoot) - if err != nil { - return err - } - } else { - return fmt.Errorf("invalid tombstone %s", name) - } - - return nil -} - -func (w *legacyLayerWriter) Write(b []byte) (int, error) { - if w.backupWriter == nil && w.currentFile == nil { - return 0, errors.New("closed") - } - return w.bufWriter.Write(b) -} - -func (w *legacyLayerWriter) Close() error { - if err := w.reset(); err != nil { - return err - } - if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { - return err - } - for _, pd := range w.pendingDirs { - err := safefile.MkdirRelative(pd.Path, pd.Root) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go deleted file mode 100644 index bcf39c6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ /dev/null @@ -1,29 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// NameToGuid converts the given string into a GUID using the algorithm in the -// Host Compute Service, ensuring GUIDs generated with the same string are common -// across all clients. -func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { - title := "hcsshim::NameToGuid" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("name", name)) - - var id guid.GUID - err = nameToGuid(name, &id) - if err != nil { - return guid.GUID{}, hcserror.New(err, title+" - failed", "") - } - span.AddAttributes(trace.StringAttribute("guid", id.String())) - return id, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go deleted file mode 100644 index 55f7730..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ /dev/null @@ -1,44 +0,0 @@ -package wclayer - -import ( - "context" - "strings" - "sync" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -var prepareLayerLock sync.Mutex - -// PrepareLayer finds a mounted read-write layer matching path and enables the -// the filesystem filter for use on that layer. This requires the paths to all -// parent layers, and is necessary in order to view or interact with the layer -// as an actual filesystem (reading and writing files, creating directories, etc). -// Disabling the filter must be done via UnprepareLayer. -func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { - title := "hcsshim::PrepareLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - // This lock is a temporary workaround for a Windows bug. Only allowing one - // call to prepareLayer at a time vastly reduces the chance of a timeout. - prepareLayerLock.Lock() - defer prepareLayerLock.Unlock() - err = prepareLayer(&stdDriverInfo, path, layers) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go deleted file mode 100644 index 30bcdff..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go +++ /dev/null @@ -1,41 +0,0 @@ -package wclayer - -import ( - "context" - "os" - - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ProcessBaseLayer post-processes a base layer that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessBaseLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::ProcessBaseLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = processBaseImage(path) - if err != nil { - return &os.PathError{Op: title, Path: path, Err: err} - } - return nil -} - -// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { - title := "hcsshim::ProcessUtilityVMImage" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = processUtilityImage(path) - if err != nil { - return &os.PathError{Op: title, Path: path, Err: err} - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go deleted file mode 100644 index 79fb986..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ /dev/null @@ -1,25 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// UnprepareLayer disables the filesystem filter for the read-write layer with -// the given id. -func UnprepareLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::UnprepareLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = unprepareLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title+" - failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go deleted file mode 100644 index 9b1e06d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package wclayer provides bindings to HCS's legacy layer management API and -// provides a higher level interface around these calls for container layer -// management. -package wclayer - -import "github.com/Microsoft/go-winio/pkg/guid" - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go - -//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? -//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? -//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? -//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? -//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? -//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? -//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? -//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? -//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? -//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? -//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? -//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? -//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid? -//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? -//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? -//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? -//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? - -//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? - -//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk - -//sys getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) = GetDiskFreeSpaceExW - -type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go deleted file mode 100644 index 67f917f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go +++ /dev/null @@ -1,569 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package wclayer - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procActivateLayer = modvmcompute.NewProc("ActivateLayer") - procCopyLayer = modvmcompute.NewProc("CopyLayer") - procCreateLayer = modvmcompute.NewProc("CreateLayer") - procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") - procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") - procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") - procDestroyLayer = modvmcompute.NewProc("DestroyLayer") - procExportLayer = modvmcompute.NewProc("ExportLayer") - procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") - procGetBaseImages = modvmcompute.NewProc("GetBaseImages") - procImportLayer = modvmcompute.NewProc("ImportLayer") - procLayerExists = modvmcompute.NewProc("LayerExists") - procNameToGuid = modvmcompute.NewProc("NameToGuid") - procPrepareLayer = modvmcompute.NewProc("PrepareLayer") - procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") - procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") - procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") - procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") - procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") - procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") -) - -func activateLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _activateLayer(info, _p0) -} - -func _activateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procActivateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(srcId) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(dstId) - if hr != nil { - return - } - return _copyLayer(info, _p0, _p1, descriptors) -} - -func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procCopyLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func createLayer(info *driverInfo, id string, parent string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(parent) - if hr != nil { - return - } - return _createLayer(info, _p0, _p1) -} - -func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { - if hr = procCreateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _createSandboxLayer(info, _p0, parent, descriptors) -} - -func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procCreateSandboxLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _expandSandboxSize(info, _p0, size) -} - -func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { - if hr = procExpandSandboxSize.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func deactivateLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _deactivateLayer(info, _p0) -} - -func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDeactivateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func destroyLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _destroyLayer(info, _p0) -} - -func _destroyLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDestroyLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _exportLayer(info, _p0, _p1, descriptors) -} - -func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procExportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _getLayerMountPath(info, _p0, length, buffer) -} - -func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { - if hr = procGetLayerMountPath.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func getBaseImages(buffer **uint16) (hr error) { - if hr = procGetBaseImages.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _importLayer(info, _p0, _p1, descriptors) -} - -func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procImportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _layerExists(info, _p0, exists) -} - -func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { - if hr = procLayerExists.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func nameToGuid(name string, guid *_guid) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(name) - if hr != nil { - return - } - return _nameToGuid(_p0, guid) -} - -func _nameToGuid(name *uint16, guid *_guid) (hr error) { - if hr = procNameToGuid.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _prepareLayer(info, _p0, descriptors) -} - -func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procPrepareLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func unprepareLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _unprepareLayer(info, _p0) -} - -func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procUnprepareLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func processBaseImage(path string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _processBaseImage(_p0) -} - -func _processBaseImage(path *uint16) (hr error) { - if hr = procProcessBaseImage.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func processUtilityImage(path string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _processUtilityImage(_p0) -} - -func _processUtilityImage(path *uint16) (hr error) { - if hr = procProcessUtilityImage.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func grantVmAccess(vmid string, filepath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(vmid) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(filepath) - if hr != nil { - return - } - return _grantVmAccess(_p0, _p1) -} - -func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { - if hr = procGrantVmAccess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { - return - } - return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) -} - -func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(directoryName) - if err != nil { - return - } - return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) -} - -func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go deleted file mode 100644 index df28ea2..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go +++ /dev/null @@ -1,13 +0,0 @@ -package winapi - -import "github.com/Microsoft/go-winio/pkg/guid" - -//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA -//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA -//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW -//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW - -type DevPropKey struct { - Fmtid guid.GUID - Pid uint32 -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go deleted file mode 100644 index 4e80ef6..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go +++ /dev/null @@ -1,15 +0,0 @@ -package winapi - -import "syscall" - -//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError - -const ( - STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B - ERROR_NO_MORE_ITEMS = 0x103 - ERROR_MORE_DATA syscall.Errno = 234 -) - -func NTSuccess(status uint32) bool { - return status == 0 -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go deleted file mode 100644 index 7ce52af..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go +++ /dev/null @@ -1,110 +0,0 @@ -package winapi - -//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile -//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile - -//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject -//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject - -const ( - FileLinkInformationClass = 11 - FileDispositionInformationExClass = 64 - - FILE_READ_ATTRIBUTES = 0x0080 - FILE_WRITE_ATTRIBUTES = 0x0100 - DELETE = 0x10000 - - FILE_OPEN = 1 - FILE_CREATE = 2 - - FILE_LIST_DIRECTORY = 0x00000001 - FILE_DIRECTORY_FILE = 0x00000001 - FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 - FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 - FILE_OPEN_REPARSE_POINT = 0x00200000 - - FILE_DISPOSITION_DELETE = 0x00000001 - - OBJ_DONT_REPARSE = 0x1000 - - STATUS_MORE_ENTRIES = 0x105 - STATUS_NO_MORE_ENTRIES = 0x8000001a -) - -// Select entries from FILE_INFO_BY_HANDLE_CLASS. -// -// C declaration: -// typedef enum _FILE_INFO_BY_HANDLE_CLASS { -// FileBasicInfo, -// FileStandardInfo, -// FileNameInfo, -// FileRenameInfo, -// FileDispositionInfo, -// FileAllocationInfo, -// FileEndOfFileInfo, -// FileStreamInfo, -// FileCompressionInfo, -// FileAttributeTagInfo, -// FileIdBothDirectoryInfo, -// FileIdBothDirectoryRestartInfo, -// FileIoPriorityHintInfo, -// FileRemoteProtocolInfo, -// FileFullDirectoryInfo, -// FileFullDirectoryRestartInfo, -// FileStorageInfo, -// FileAlignmentInfo, -// FileIdInfo, -// FileIdExtdDirectoryInfo, -// FileIdExtdDirectoryRestartInfo, -// FileDispositionInfoEx, -// FileRenameInfoEx, -// FileCaseSensitiveInfo, -// FileNormalizedNameInfo, -// MaximumFileInfoByHandleClass -// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; -// -// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class -const ( - FileIdInfo = 18 -) - -type FileDispositionInformationEx struct { - Flags uintptr -} - -type IOStatusBlock struct { - Status, Information uintptr -} - -type ObjectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *UnicodeString - Attributes uintptr - SecurityDescriptor uintptr - SecurityQoS uintptr -} - -type ObjectDirectoryInformation struct { - Name UnicodeString - TypeName UnicodeString -} - -type FileLinkInformation struct { - ReplaceIfExists bool - RootDirectory uintptr - FileNameLength uint32 - FileName [1]uint16 -} - -// C declaration: -// typedef struct _FILE_ID_INFO { -// ULONGLONG VolumeSerialNumber; -// FILE_ID_128 FileId; -// } FILE_ID_INFO, *PFILE_ID_INFO; -// -// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info -type FILE_ID_INFO struct { - VolumeSerialNumber uint64 - FileID [16]byte -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go deleted file mode 100644 index 4e609cb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go +++ /dev/null @@ -1,3 +0,0 @@ -package winapi - -//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go deleted file mode 100644 index ba12b1a..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go +++ /dev/null @@ -1,215 +0,0 @@ -package winapi - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -// Messages that can be received from an assigned io completion port. -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port -const ( - JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 - JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 - JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 - JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 - JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 - JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 - JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 - JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 - JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 - JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 -) - -// Access rights for creating or opening job objects. -// -// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights -const JOB_OBJECT_ALL_ACCESS = 0x1F001F - -// IO limit flags -// -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information -const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 - -const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information -const ( - JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota - JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED - JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP - JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY - JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE -) - -// JobObjectInformationClass values. Used for a call to QueryInformationJobObject -// -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject -const ( - JobObjectBasicAccountingInformation uint32 = 1 - JobObjectBasicProcessIdList uint32 = 3 - JobObjectBasicAndIoAccountingInformation uint32 = 8 - JobObjectLimitViolationInformation uint32 = 13 - JobObjectMemoryUsageInformation uint32 = 28 - JobObjectNotificationLimitInformation2 uint32 = 33 - JobObjectIoAttribution uint32 = 42 -) - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information -type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { - ControlFlags uint32 - Value uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information -type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { - MaxIops int64 - MaxBandwidth int64 - ReservationIops int64 - BaseIOSize uint32 - VolumeName string - ControlFlags uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list -type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { - NumberOfAssignedProcesses uint32 - NumberOfProcessIdsInList uint32 - ProcessIdList [1]uintptr -} - -// AllPids returns all the process Ids in the job object. -func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { - return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information -type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { - TotalUserTime int64 - TotalKernelTime int64 - ThisPeriodTotalUserTime int64 - ThisPeriodTotalKernelTime int64 - TotalPageFaultCount uint32 - TotalProcesses uint32 - ActiveProcesses uint32 - TotalTerminateProcesses uint32 -} - -//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information -type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { - BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION - IoInfo windows.IO_COUNTERS -} - -// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { -// ULONG64 JobMemory; -// ULONG64 PeakJobMemoryUsed; -// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; -// -type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { - JobMemory uint64 - PeakJobMemoryUsed uint64 -} - -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { -// ULONG_PTR IoCount; -// ULONGLONG TotalNonOverlappedQueueTime; -// ULONGLONG TotalNonOverlappedServiceTime; -// ULONGLONG TotalSize; -// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; -// -type JOBOBJECT_IO_ATTRIBUTION_STATS struct { - IoCount uintptr - TotalNonOverlappedQueueTime uint64 - TotalNonOverlappedServiceTime uint64 - TotalSize uint64 -} - -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { -// ULONG ControlFlags; -// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; -// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; -// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; -// -type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { - ControlFlags uint32 - ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS - WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port -type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { - CompletionKey windows.Handle - CompletionPort windows.Handle -} - -// BOOL IsProcessInJob( -// HANDLE ProcessHandle, -// HANDLE JobHandle, -// PBOOL Result -// ); -// -//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob - -// BOOL QueryInformationJobObject( -// HANDLE hJob, -// JOBOBJECTINFOCLASS JobObjectInformationClass, -// LPVOID lpJobObjectInformation, -// DWORD cbJobObjectInformationLength, -// LPDWORD lpReturnLength -// ); -// -//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject - -// HANDLE OpenJobObjectW( -// DWORD dwDesiredAccess, -// BOOL bInheritHandle, -// LPCWSTR lpName -// ); -// -//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW - -// DWORD SetIoRateControlInformationJobObject( -// HANDLE hJob, -// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo -// ); -// -//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject - -// DWORD QueryIoRateControlInformationJobObject( -// HANDLE hJob, -// PCWSTR VolumeName, -// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, -// ULONG *InfoBlockCount -// ); -//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject - -// NTSTATUS -// NtOpenJobObject ( -// _Out_ PHANDLE JobHandle, -// _In_ ACCESS_MASK DesiredAccess, -// _In_ POBJECT_ATTRIBUTES ObjectAttributes -// ); -//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject - -// NTSTATUS -// NTAPI -// NtCreateJobObject ( -// _Out_ PHANDLE JobHandle, -// _In_ ACCESS_MASK DesiredAccess, -// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes -// ); -//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go deleted file mode 100644 index b6e7cfd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go +++ /dev/null @@ -1,30 +0,0 @@ -package winapi - -// BOOL LogonUserA( -// LPCWSTR lpszUsername, -// LPCWSTR lpszDomain, -// LPCWSTR lpszPassword, -// DWORD dwLogonType, -// DWORD dwLogonProvider, -// PHANDLE phToken -// ); -// -//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW - -// Logon types -const ( - LOGON32_LOGON_INTERACTIVE uint32 = 2 - LOGON32_LOGON_NETWORK uint32 = 3 - LOGON32_LOGON_BATCH uint32 = 4 - LOGON32_LOGON_SERVICE uint32 = 5 - LOGON32_LOGON_UNLOCK uint32 = 7 - LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 - LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 -) - -// Logon providers -const ( - LOGON32_PROVIDER_DEFAULT uint32 = 0 - LOGON32_PROVIDER_WINNT40 uint32 = 2 - LOGON32_PROVIDER_WINNT50 uint32 = 3 -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go deleted file mode 100644 index 83f7040..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go +++ /dev/null @@ -1,27 +0,0 @@ -package winapi - -// VOID RtlMoveMemory( -// _Out_ VOID UNALIGNED *Destination, -// _In_ const VOID UNALIGNED *Source, -// _In_ SIZE_T Length -// ); -//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory - -//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc -//sys LocalFree(ptr uintptr) = kernel32.LocalFree - -// BOOL QueryWorkingSet( -// HANDLE hProcess, -// PVOID pv, -// DWORD cb -// ); -//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet - -type PSAPI_WORKING_SET_INFORMATION struct { - NumberOfEntries uintptr - WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK -} - -type PSAPI_WORKING_SET_BLOCK struct { - Flags uintptr -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go deleted file mode 100644 index f379100..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go +++ /dev/null @@ -1,3 +0,0 @@ -package winapi - -//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go deleted file mode 100644 index 908920e..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go +++ /dev/null @@ -1,11 +0,0 @@ -package winapi - -// DWORD SearchPathW( -// LPCWSTR lpPath, -// LPCWSTR lpFileName, -// LPCWSTR lpExtension, -// DWORD nBufferLength, -// LPWSTR lpBuffer, -// LPWSTR *lpFilePart -// ); -//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go deleted file mode 100644 index b870683..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go +++ /dev/null @@ -1,10 +0,0 @@ -package winapi - -const PROCESS_ALL_ACCESS uint32 = 2097151 - -// DWORD GetProcessImageFileNameW( -// HANDLE hProcess, -// LPWSTR lpImageFileName, -// DWORD nSize -// ); -//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go deleted file mode 100644 index ce79ac2..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go +++ /dev/null @@ -1,7 +0,0 @@ -package winapi - -// Get count from all processor groups. -// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups -const ALL_PROCESSOR_GROUPS = 0xFFFF - -//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go deleted file mode 100644 index 327f57d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go +++ /dev/null @@ -1,52 +0,0 @@ -package winapi - -import "golang.org/x/sys/windows" - -const SystemProcessInformation = 5 - -const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 - -// __kernel_entry NTSTATUS NtQuerySystemInformation( -// SYSTEM_INFORMATION_CLASS SystemInformationClass, -// PVOID SystemInformation, -// ULONG SystemInformationLength, -// PULONG ReturnLength -// ); -//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation - -type SYSTEM_PROCESS_INFORMATION struct { - NextEntryOffset uint32 // ULONG - NumberOfThreads uint32 // ULONG - WorkingSetPrivateSize int64 // LARGE_INTEGER - HardFaultCount uint32 // ULONG - NumberOfThreadsHighWatermark uint32 // ULONG - CycleTime uint64 // ULONGLONG - CreateTime int64 // LARGE_INTEGER - UserTime int64 // LARGE_INTEGER - KernelTime int64 // LARGE_INTEGER - ImageName UnicodeString // UNICODE_STRING - BasePriority int32 // KPRIORITY - UniqueProcessID windows.Handle // HANDLE - InheritedFromUniqueProcessID windows.Handle // HANDLE - HandleCount uint32 // ULONG - SessionID uint32 // ULONG - UniqueProcessKey *uint32 // ULONG_PTR - PeakVirtualSize uintptr // SIZE_T - VirtualSize uintptr // SIZE_T - PageFaultCount uint32 // ULONG - PeakWorkingSetSize uintptr // SIZE_T - WorkingSetSize uintptr // SIZE_T - QuotaPeakPagedPoolUsage uintptr // SIZE_T - QuotaPagedPoolUsage uintptr // SIZE_T - QuotaPeakNonPagedPoolUsage uintptr // SIZE_T - QuotaNonPagedPoolUsage uintptr // SIZE_T - PagefileUsage uintptr // SIZE_T - PeakPagefileUsage uintptr // SIZE_T - PrivatePageCount uintptr // SIZE_T - ReadOperationCount int64 // LARGE_INTEGER - WriteOperationCount int64 // LARGE_INTEGER - OtherOperationCount int64 // LARGE_INTEGER - ReadTransferCount int64 // LARGE_INTEGER - WriteTransferCount int64 // LARGE_INTEGER - OtherTransferCount int64 // LARGE_INTEGER -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go deleted file mode 100644 index 4724713..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go +++ /dev/null @@ -1,12 +0,0 @@ -package winapi - -// HANDLE CreateRemoteThread( -// HANDLE hProcess, -// LPSECURITY_ATTRIBUTES lpThreadAttributes, -// SIZE_T dwStackSize, -// LPTHREAD_START_ROUTINE lpStartAddress, -// LPVOID lpParameter, -// DWORD dwCreationFlags, -// LPDWORD lpThreadId -// ); -//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go deleted file mode 100644 index db59567..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ /dev/null @@ -1,75 +0,0 @@ -package winapi - -import ( - "errors" - "reflect" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice -// for easier interop with Go APIs -func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) - hdr.Data = uintptr(unsafe.Pointer(buffer)) - hdr.Cap = bufferLength - hdr.Len = bufferLength - - return -} - -type UnicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer *uint16 -} - -//String converts a UnicodeString to a golang string -func (uni UnicodeString) String() string { - // UnicodeString is not guaranteed to be null terminated, therefore - // use the UnicodeString's Length field - return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) -} - -// NewUnicodeString allocates a new UnicodeString and copies `s` into -// the buffer of the new UnicodeString. -func NewUnicodeString(s string) (*UnicodeString, error) { - // Get length of original `s` to use in the UnicodeString since the `buf` - // created later will have an additional trailing null character - length := len(s) - if length > 32767 { - return nil, syscall.ENAMETOOLONG - } - - buf, err := windows.UTF16FromString(s) - if err != nil { - return nil, err - } - uni := &UnicodeString{ - Length: uint16(length * 2), - MaximumLength: uint16(length * 2), - Buffer: &buf[0], - } - return uni, nil -} - -// ConvertStringSetToSlice is a helper function used to convert the contents of -// `buf` into a string slice. `buf` contains a set of null terminated strings -// with an additional null at the end to indicate the end of the set. -func ConvertStringSetToSlice(buf []byte) ([]string, error) { - var results []string - prev := 0 - for i := range buf { - if buf[i] == 0 { - if prev == i { - // found two null characters in a row, return result - return results, nil - } - results = append(results, string(buf[prev:i])) - prev = i + 1 - } - } - return nil, errors.New("string set malformed: missing null terminator at end of buffer") -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go deleted file mode 100644 index ec88c0d..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package winapi contains various low-level bindings to Windows APIs. It can -// be thought of as an extension to golang.org/x/sys/windows. -package winapi - -//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go deleted file mode 100644 index 2941b0f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ /dev/null @@ -1,371 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package winapi - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modpsapi = windows.NewLazySystemDLL("psapi.dll") - modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") - - procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") - procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") - procSearchPathW = modkernel32.NewProc("SearchPathW") - procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") - procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") - procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") - procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") - procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") - procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") - procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") - procLogonUserW = modadvapi32.NewProc("LogonUserW") - procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet") - procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW") - procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") - procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") - procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") - procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") - procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") - procNtCreateFile = modntdll.NewProc("NtCreateFile") - procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") - procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") - procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") - procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") -) - -func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - status = uint32(r0) - return -} - -func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { - r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) - if r0 != 0 { - win32Err = syscall.Errno(r0) - } - return -} - -func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) - size = uint32(r0) - if size == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { - r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) - return -} - -func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) - return -} - -func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { - r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LocalAlloc(flags uint32, size int) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) - ptr = uintptr(r0) - return -} - -func LocalFree(ptr uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) - return -} - -func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize)) - size = uint32(r0) - if size == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) - amount = uint32(r0) - return -} - -func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(pDeviceID) - if hr != nil { - return - } - return _CMLocateDevNode(pdnDevInst, _p0, uFlags) -} - -func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) - status = uint32(r0) - return -} - -func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) - status = uint32(r0) - return -} - -func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) - status = uint32(r0) - return -} - -func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { - var _p0 uint32 - if singleEntry { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if restartScan { - _p1 = 1 - } else { - _p1 = 0 - } - r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) - status = uint32(r0) - return -} - -func RtlNtStatusToDosError(status uint32) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go deleted file mode 100644 index 8916163..0000000 --- a/vendor/github.com/Microsoft/hcsshim/layer.go +++ /dev/null @@ -1,107 +0,0 @@ -package hcsshim - -import ( - "context" - "crypto/sha1" - "path/filepath" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -func layerPath(info *DriverInfo, id string) string { - return filepath.Join(info.HomeDir, id) -} - -func ActivateLayer(info DriverInfo, id string) error { - return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) -} -func CreateLayer(info DriverInfo, id, parent string) error { - return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) -} - -// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. -func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func DeactivateLayer(info DriverInfo, id string) error { - return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) -} -func DestroyLayer(info DriverInfo, id string) error { - return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) -} - -// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. -func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) -} -func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) -} -func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { - return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) -} -func GetLayerMountPath(info DriverInfo, id string) (string, error) { - return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) -} -func GetSharedBaseImages() (imageData string, err error) { - return wclayer.GetSharedBaseImages(context.Background()) -} -func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { - return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) -} -func LayerExists(info DriverInfo, id string) (bool, error) { - return wclayer.LayerExists(context.Background(), layerPath(&info, id)) -} -func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { - return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func ProcessBaseLayer(path string) error { - return wclayer.ProcessBaseLayer(context.Background(), path) -} -func ProcessUtilityVMImage(path string) error { - return wclayer.ProcessUtilityVMImage(context.Background(), path) -} -func UnprepareLayer(info DriverInfo, layerId string) error { - return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) -} - -type DriverInfo struct { - Flavour int - HomeDir string -} - -type GUID [16]byte - -func NameToGuid(name string) (id GUID, err error) { - g, err := wclayer.NameToGuid(context.Background(), name) - return g.ToWindowsArray(), err -} - -func NewGUID(source string) *GUID { - h := sha1.Sum([]byte(source)) - var g GUID - copy(g[0:], h[0:16]) - return &g -} - -func (g *GUID) ToString() string { - return guid.FromWindowsArray(*g).String() -} - -type LayerReader = wclayer.LayerReader - -func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { - return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) -} - -type LayerWriter = wclayer.LayerWriter - -func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { - return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) -} - -type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go deleted file mode 100644 index 3ab3bcd..0000000 --- a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go +++ /dev/null @@ -1,50 +0,0 @@ -package osversion - -import ( - "fmt" - "sync" - - "golang.org/x/sys/windows" -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -var ( - osv OSVersion - once sync.Once -) - -// Get gets the operating system version on Windows. -// The calling application must be manifested to get the correct version information. -func Get() OSVersion { - once.Do(func() { - var err error - osv = OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - }) - return osv -} - -// Build gets the build-number on Windows -// The calling application must be manifested to get the correct version information. -func Build() uint16 { - return Get().Build -} - -func (osv OSVersion) ToString() string { - return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) -} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go deleted file mode 100644 index e9267b9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ /dev/null @@ -1,38 +0,0 @@ -package osversion - -const ( - // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server - // 2016 (ltsc2016) and Windows 10 (Anniversary Update). - RS1 = 14393 - - // RS2 (version 1703, codename "Redstone 2") was a client-only update, and - // corresponds to Windows 10 (Creators Update). - RS2 = 15063 - - // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server - // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). - RS3 = 16299 - - // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server - // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). - RS4 = 17134 - - // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server - // 2019 (ltsc2019), and Windows 10 (October 2018 Update). - RS5 = 17763 - - // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual - // channel). - V19H1 = 18362 - - // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual - // channel). - V19H2 = 18363 - - // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual - // channel). - V20H1 = 19041 - - // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). - V20H2 = 19042 -) diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE deleted file mode 100644 index 5f9d59f..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE +++ /dev/null @@ -1,22 +0,0 @@ -go-runhcs is a fork of go-runc - -The following is runc's legal notice. - ---- - -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go deleted file mode 100644 index 64491a7..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go +++ /dev/null @@ -1,173 +0,0 @@ -package runhcs - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - "sync/atomic" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" - "github.com/containerd/go-runc" -) - -// Format is the type of log formatting options available. -type Format string - -const ( - none Format = "" - // Text is the default text log ouput. - Text Format = "text" - // JSON is the JSON formatted log output. - JSON Format = "json" -) - -var runhcsPath atomic.Value - -func getCommandPath() string { - const command = "runhcs.exe" - - pathi := runhcsPath.Load() - if pathi == nil { - path, err := exec.LookPath(command) - if err != nil { - // LookPath only finds current directory matches based on the - // callers current directory but the caller is not likely in the - // same directory as the containerd executables. Instead match the - // calling binaries path (a containerd shim usually) and see if they - // are side by side. If so execute the runhcs.exe found there. - if self, serr := os.Executable(); serr == nil { - testPath := filepath.Join(filepath.Dir(self), command) - if _, serr := os.Stat(testPath); serr == nil { - path = testPath - } - } - if path == "" { - // Failed to look up command just use it directly and let the - // Windows loader find it. - path = command - } - runhcsPath.Store(path) - return path - } - apath, err := filepath.Abs(path) - if err != nil { - // We couldnt make `path` an `AbsPath`. Just use `path` directly and - // let the Windows loader find it. - apath = path - } - runhcsPath.Store(apath) - return apath - } - return pathi.(string) -} - -var bytesBufferPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -func getBuf() *bytes.Buffer { - return bytesBufferPool.Get().(*bytes.Buffer) -} - -func putBuf(b *bytes.Buffer) { - b.Reset() - bytesBufferPool.Put(b) -} - -// Runhcs is the client to the runhcs cli -type Runhcs struct { - // Debug enables debug output for logging. - Debug bool - // Log sets the log file path or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-log) where internal debug information is written. - Log string - // LogFormat sets the format used by logs. - LogFormat Format - // Owner sets the compute system owner property. - Owner string - // Root is the registry key root for storage of runhcs container state. - Root string -} - -func (r *Runhcs) args() []string { - var out []string - if r.Debug { - out = append(out, "--debug") - } - if r.Log != "" { - if strings.HasPrefix(r.Log, irunhcs.SafePipePrefix) { - out = append(out, "--log", r.Log) - } else { - abs, err := filepath.Abs(r.Log) - if err == nil { - out = append(out, "--log", abs) - } - } - } - if r.LogFormat != none { - out = append(out, "--log-format", string(r.LogFormat)) - } - if r.Owner != "" { - out = append(out, "--owner", r.Owner) - } - if r.Root != "" { - out = append(out, "--root", r.Root) - } - return out -} - -func (r *Runhcs) command(context context.Context, args ...string) *exec.Cmd { - cmd := exec.CommandContext(context, getCommandPath(), append(r.args(), args...)...) - cmd.Env = os.Environ() - return cmd -} - -// runOrError will run the provided command. If an error is -// encountered and neither Stdout or Stderr was set the error and the -// stderr of the command will be returned in the format of : -// -func (r *Runhcs) runOrError(cmd *exec.Cmd) error { - if cmd.Stdout != nil || cmd.Stderr != nil { - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return err - } - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) - } - return err - } - data, err := cmdOutput(cmd, true) - if err != nil { - return fmt.Errorf("%s: %s", err, data) - } - return nil -} - -func cmdOutput(cmd *exec.Cmd, combined bool) ([]byte, error) { - b := getBuf() - defer putBuf(b) - - cmd.Stdout = b - if combined { - cmd.Stderr = b - } - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return nil, err - } - - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) - } - - return b.Bytes(), err -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go deleted file mode 100644 index 720386c..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go +++ /dev/null @@ -1,54 +0,0 @@ -package runhcs - -import ( - "context" - "errors" - "path/filepath" - "strconv" -) - -// CreateScratch creates a scratch vhdx at 'destpath' that is ext4 formatted. -func (r *Runhcs) CreateScratch(context context.Context, destpath string) error { - return r.CreateScratchWithOpts(context, destpath, nil) -} - -// CreateScratchOpts is the set of options that can be used with the -// `CreateScratchWithOpts` command. -type CreateScratchOpts struct { - // SizeGB is the size in GB of the scratch file to create. - SizeGB int - // CacheFile is the path to an existing `scratch.vhx` to copy. If - // `CacheFile` does not exit the scratch will be created. - CacheFile string -} - -func (opt *CreateScratchOpts) args() ([]string, error) { - var out []string - if opt.SizeGB < 0 { - return nil, errors.New("sizeGB must be >= 0") - } else if opt.SizeGB > 0 { - out = append(out, "--sizeGB", strconv.Itoa(opt.SizeGB)) - } - if opt.CacheFile != "" { - abs, err := filepath.Abs(opt.CacheFile) - if err != nil { - return nil, err - } - out = append(out, "--cache-path", abs) - } - return out, nil -} - -// CreateScratchWithOpts creates a scratch vhdx at 'destpath' that is ext4 -// formatted based on `opts`. -func (r *Runhcs) CreateScratchWithOpts(context context.Context, destpath string, opts *CreateScratchOpts) error { - args := []string{"create-scratch", "--destpath", destpath} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - return r.runOrError(r.command(context, args...)) -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go deleted file mode 100644 index 20d5d40..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go +++ /dev/null @@ -1,101 +0,0 @@ -package runhcs - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" - runc "github.com/containerd/go-runc" -) - -// CreateOpts is set of options that can be used with the Create command. -type CreateOpts struct { - runc.IO - // PidFile is the path to the file to write the process id to. - PidFile string - // ShimLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--shim-log) for the launched shim process. - ShimLog string - // VMLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--vm-log) for the launched VM shim process. - VMLog string - // VMConsole is the path to the pipe for the VM's console (e.g. \\.\pipe\debugpipe) - VMConsole string -} - -func (opt *CreateOpts) args() ([]string, error) { - var out []string - if opt.PidFile != "" { - abs, err := filepath.Abs(opt.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if opt.ShimLog != "" { - if strings.HasPrefix(opt.ShimLog, irunhcs.SafePipePrefix) { - out = append(out, "--shim-log", opt.ShimLog) - } else { - abs, err := filepath.Abs(opt.ShimLog) - if err != nil { - return nil, err - } - out = append(out, "--shim-log", abs) - } - } - if opt.VMLog != "" { - if strings.HasPrefix(opt.VMLog, irunhcs.SafePipePrefix) { - out = append(out, "--vm-log", opt.VMLog) - } else { - abs, err := filepath.Abs(opt.VMLog) - if err != nil { - return nil, err - } - out = append(out, "--vm-log", abs) - } - } - if opt.VMConsole != "" { - out = append(out, "--vm-console", opt.VMConsole) - } - return out, nil -} - -// Create creates a new container and returns its pid if it was created -// successfully. -func (r *Runhcs) Create(context context.Context, id, bundle string, opts *CreateOpts) error { - args := []string{"create", "--bundle", bundle} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) - if err != nil { - return fmt.Errorf("%s: %s", err, data) - } - return nil - } - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(runc.StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) - } - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go deleted file mode 100644 index 08b82bb..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go +++ /dev/null @@ -1,33 +0,0 @@ -package runhcs - -import ( - "context" -) - -// DeleteOpts is set of options that can be used with the Delete command. -type DeleteOpts struct { - // Force forcibly deletes the container if it is still running (uses SIGKILL). - Force bool -} - -func (opt *DeleteOpts) args() ([]string, error) { - var out []string - if opt.Force { - out = append(out, "--force") - } - return out, nil -} - -// Delete any resources held by the container often used with detached -// containers. -func (r *Runhcs) Delete(context context.Context, id string, opts *DeleteOpts) error { - args := []string{"delete"} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - return r.runOrError(r.command(context, append(args, id)...)) -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go deleted file mode 100644 index 090a0a3..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go +++ /dev/null @@ -1,88 +0,0 @@ -package runhcs - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" - "github.com/containerd/go-runc" -) - -// ExecOpts is set of options that can be used with the Exec command. -type ExecOpts struct { - runc.IO - // Detach from the container's process. - Detach bool - // PidFile is the path to the file to write the process id to. - PidFile string - // ShimLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs---log) for the launched shim process. - ShimLog string -} - -func (opt *ExecOpts) args() ([]string, error) { - var out []string - if opt.Detach { - out = append(out, "--detach") - } - if opt.PidFile != "" { - abs, err := filepath.Abs(opt.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if opt.ShimLog != "" { - if strings.HasPrefix(opt.ShimLog, irunhcs.SafePipePrefix) { - out = append(out, "--shim-log", opt.ShimLog) - } else { - abs, err := filepath.Abs(opt.ShimLog) - if err != nil { - return nil, err - } - out = append(out, "--shim-log", abs) - } - } - return out, nil -} - -// Exec executes an additional process inside the container based on the -// oci.Process spec found at processFile. -func (r *Runhcs) Exec(context context.Context, id, processFile string, opts *ExecOpts) error { - args := []string{"exec", "--process", processFile} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) - if err != nil { - return fmt.Errorf("%s: %s", err, data) - } - return nil - } - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(runc.StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) - } - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go deleted file mode 100644 index 021e5b1..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go +++ /dev/null @@ -1,11 +0,0 @@ -package runhcs - -import ( - "context" -) - -// Kill sends the specified signal (default: SIGTERM) to the container's init -// process. -func (r *Runhcs) Kill(context context.Context, id, signal string) error { - return r.runOrError(r.command(context, "kill", id, signal)) -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go deleted file mode 100644 index 3b92080..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package runhcs - -import ( - "context" - "encoding/json" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" -) - -// ContainerState is the representation of the containers state at the moment of -// query. -type ContainerState = irunhcs.ContainerState - -// List containers started by runhcs. -// -// Note: This is specific to the Runhcs.Root namespace provided in the global -// settings. -func (r *Runhcs) List(context context.Context) ([]*ContainerState, error) { - data, err := cmdOutput(r.command(context, "list", "--format=json"), false) - if err != nil { - return nil, err - } - var out []*ContainerState - if err := json.Unmarshal(data, &out); err != nil { - return nil, err - } - return out, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go deleted file mode 100644 index 56392fa..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package runhcs - -import ( - "context" -) - -// Pause suspends all processes inside the container. -func (r *Runhcs) Pause(context context.Context, id string) error { - return r.runOrError(r.command(context, "pause", id)) -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go deleted file mode 100644 index 4dc9f14..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go +++ /dev/null @@ -1,20 +0,0 @@ -package runhcs - -import ( - "context" - "encoding/json" - "fmt" -) - -// Ps displays the processes running inside a container. -func (r *Runhcs) Ps(context context.Context, id string) ([]int, error) { - data, err := cmdOutput(r.command(context, "ps", "--format=json", id), true) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data) - } - var out []int - if err := json.Unmarshal(data, &out); err != nil { - return nil, err - } - return out, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go deleted file mode 100644 index b9f9049..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go +++ /dev/null @@ -1,33 +0,0 @@ -package runhcs - -import ( - "context" - "strconv" -) - -// ResizeTTYOpts is set of options that can be used with the ResizeTTY command. -type ResizeTTYOpts struct { - // Pid is the process pid (defaults to init pid). - Pid *int -} - -func (opt *ResizeTTYOpts) args() ([]string, error) { - var out []string - if opt.Pid != nil { - out = append(out, "--pid", strconv.Itoa(*opt.Pid)) - } - return out, nil -} - -// ResizeTTY updates the terminal size for a container process. -func (r *Runhcs) ResizeTTY(context context.Context, id string, width, height uint16, opts *ResizeTTYOpts) error { - args := []string{"resize-tty"} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - return r.runOrError(r.command(context, append(args, id, strconv.FormatUint(uint64(width), 10), strconv.FormatUint(uint64(height), 10))...)) -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go deleted file mode 100644 index 1fdeb87..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go +++ /dev/null @@ -1,10 +0,0 @@ -package runhcs - -import ( - "context" -) - -// Resume resumes all processes that have been previously paused. -func (r *Runhcs) Resume(context context.Context, id string) error { - return r.runOrError(r.command(context, "resume", id)) -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go deleted file mode 100644 index ad3df74..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go +++ /dev/null @@ -1,10 +0,0 @@ -package runhcs - -import ( - "context" -) - -// Start will start an already created container. -func (r *Runhcs) Start(context context.Context, id string) error { - return r.runOrError(r.command(context, "start", id)) -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go deleted file mode 100644 index b22bb07..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go +++ /dev/null @@ -1,20 +0,0 @@ -package runhcs - -import ( - "context" - "encoding/json" - "fmt" -) - -// State outputs the state of a container. -func (r *Runhcs) State(context context.Context, id string) (*ContainerState, error) { - data, err := cmdOutput(r.command(context, "state", id), true) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data) - } - var out ContainerState - if err := json.Unmarshal(data, &out); err != nil { - return nil, err - } - return &out, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go deleted file mode 100644 index e3f1be3..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go +++ /dev/null @@ -1,88 +0,0 @@ -// Package ociwclayer provides functions for importing and exporting Windows -// container layers from and to their OCI tar representation. -package ociwclayer - -import ( - "archive/tar" - "context" - "io" - "path/filepath" - - "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim" -) - -var driverInfo = hcsshim.DriverInfo{} - -// ExportLayerToTar writes an OCI layer tar stream from the provided on-disk layer. -// The caller must specify the parent layers, if any, ordered from lowest to -// highest layer. -// -// The layer will be mounted for this process, so the caller should ensure that -// it is not currently mounted. -func ExportLayerToTar(ctx context.Context, w io.Writer, path string, parentLayerPaths []string) error { - err := hcsshim.ActivateLayer(driverInfo, path) - if err != nil { - return err - } - defer func() { - _ = hcsshim.DeactivateLayer(driverInfo, path) - }() - - // Prepare and unprepare the layer to ensure that it has been initialized. - err = hcsshim.PrepareLayer(driverInfo, path, parentLayerPaths) - if err != nil { - return err - } - err = hcsshim.UnprepareLayer(driverInfo, path) - if err != nil { - return err - } - - r, err := hcsshim.NewLayerReader(driverInfo, path, parentLayerPaths) - if err != nil { - return err - } - - err = writeTarFromLayer(ctx, r, w) - cerr := r.Close() - if err != nil { - return err - } - return cerr -} - -func writeTarFromLayer(ctx context.Context, r hcsshim.LayerReader, w io.Writer) error { - t := tar.NewWriter(w) - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - name, size, fileInfo, err := r.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if fileInfo == nil { - // Write a whiteout file. - hdr := &tar.Header{ - Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), whiteoutPrefix+filepath.Base(name))), - } - err := t.WriteHeader(hdr) - if err != nil { - return err - } - } else { - err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) - if err != nil { - return err - } - } - } - return t.Close() -} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go deleted file mode 100644 index e74a6b5..0000000 --- a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go +++ /dev/null @@ -1,148 +0,0 @@ -package ociwclayer - -import ( - "archive/tar" - "bufio" - "context" - "io" - "os" - "path" - "path/filepath" - "strings" - - winio "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim" -) - -const whiteoutPrefix = ".wh." - -var ( - // mutatedFiles is a list of files that are mutated by the import process - // and must be backed up and restored. - mutatedFiles = map[string]string{ - "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", - } -) - -// ImportLayerFromTar reads a layer from an OCI layer tar stream and extracts it to the -// specified path. The caller must specify the parent layers, if any, ordered -// from lowest to highest layer. -// -// The caller must ensure that the thread or process has acquired backup and -// restore privileges. -// -// This function returns the total size of the layer's files, in bytes. -func ImportLayerFromTar(ctx context.Context, r io.Reader, path string, parentLayerPaths []string) (int64, error) { - err := os.MkdirAll(path, 0) - if err != nil { - return 0, err - } - w, err := hcsshim.NewLayerWriter(hcsshim.DriverInfo{}, path, parentLayerPaths) - if err != nil { - return 0, err - } - n, err := writeLayerFromTar(ctx, r, w, path) - cerr := w.Close() - if err != nil { - return 0, err - } - if cerr != nil { - return 0, cerr - } - return n, nil -} - -func writeLayerFromTar(ctx context.Context, r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { - t := tar.NewReader(r) - hdr, err := t.Next() - totalSize := int64(0) - buf := bufio.NewWriter(nil) - for err == nil { - select { - case <-ctx.Done(): - return 0, ctx.Err() - default: - } - - base := path.Base(hdr.Name) - if strings.HasPrefix(base, whiteoutPrefix) { - name := path.Join(path.Dir(hdr.Name), base[len(whiteoutPrefix):]) - err = w.Remove(filepath.FromSlash(name)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else if hdr.Typeflag == tar.TypeLink { - err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else { - var ( - name string - size int64 - fileInfo *winio.FileBasicInfo - ) - name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) - if err != nil { - return 0, err - } - err = w.Add(filepath.FromSlash(name), fileInfo) - if err != nil { - return 0, err - } - hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) - totalSize += size - } - } - if err != io.EOF { - return 0, err - } - return totalSize, nil -} - -// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and -// writes it to a backup stream, and also saves any files that will be mutated -// by the import layer process to a backup location. -func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { - var bcdBackup *os.File - var bcdBackupWriter *winio.BackupFileWriter - if backupPath, ok := mutatedFiles[hdr.Name]; ok { - bcdBackup, err = os.Create(filepath.Join(root, backupPath)) - if err != nil { - return nil, err - } - defer func() { - cerr := bcdBackup.Close() - if err == nil { - err = cerr - } - }() - - bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) - defer func() { - cerr := bcdBackupWriter.Close() - if err == nil { - err = cerr - } - }() - - buf.Reset(io.MultiWriter(w, bcdBackupWriter)) - } else { - buf.Reset(w) - } - - defer func() { - ferr := buf.Flush() - if err == nil { - err = ferr - } - }() - - return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) -} diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go deleted file mode 100644 index 3362c68..0000000 --- a/vendor/github.com/Microsoft/hcsshim/process.go +++ /dev/null @@ -1,98 +0,0 @@ -package hcsshim - -import ( - "context" - "io" - "sync" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs" -) - -// ContainerError is an error encountered in HCS -type process struct { - p *hcs.Process - waitOnce sync.Once - waitCh chan struct{} - waitErr error -} - -// Pid returns the process ID of the process within the container. -func (process *process) Pid() int { - return process.p.Pid() -} - -// Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *process) Kill() error { - found, err := process.p.Kill(context.Background()) - if err != nil { - return convertProcessError(err, process) - } - if !found { - return &ProcessError{Process: process, Err: ErrElementNotFound, Operation: "hcsshim::Process::Kill"} - } - return nil -} - -// Wait waits for the process to exit. -func (process *process) Wait() error { - return convertProcessError(process.p.Wait(), process) -} - -// WaitTimeout waits for the process to exit or the duration to elapse. It returns -// false if timeout occurs. -func (process *process) WaitTimeout(timeout time.Duration) error { - process.waitOnce.Do(func() { - process.waitCh = make(chan struct{}) - go func() { - process.waitErr = process.Wait() - close(process.waitCh) - }() - }) - t := time.NewTimer(timeout) - defer t.Stop() - select { - case <-t.C: - return &ProcessError{Process: process, Err: ErrTimeout, Operation: "hcsshim::Process::Wait"} - case <-process.waitCh: - return process.waitErr - } -} - -// ExitCode returns the exit code of the process. The process must have -// already terminated. -func (process *process) ExitCode() (int, error) { - code, err := process.p.ExitCode() - if err != nil { - err = convertProcessError(err, process) - } - return code, err -} - -// ResizeConsole resizes the console of the process. -func (process *process) ResizeConsole(width, height uint16) error { - return convertProcessError(process.p.ResizeConsole(context.Background(), width, height), process) -} - -// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes; it should be possible to -// call this multiple times to get multiple interfaces. -func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { - stdin, stdout, stderr, err := process.p.StdioLegacy() - if err != nil { - err = convertProcessError(err, process) - } - return stdin, stdout, stderr, err -} - -// CloseStdin closes the write side of the stdin pipe so that the process is -// notified on the read side that there is no more data in stdin. -func (process *process) CloseStdin() error { - return convertProcessError(process.p.CloseStdin(context.Background()), process) -} - -// Close cleans up any state associated with the process but does not kill -// or wait on it. -func (process *process) Close() error { - return convertProcessError(process.p.Close(), process) -} diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go deleted file mode 100644 index 8bed848..0000000 --- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package hcsshim - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - - procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") -) - -func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { - r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177b..0000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287..0000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8..0000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/bits-and-blooms/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore deleted file mode 100644 index 5c204d2..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -target diff --git a/vendor/github.com/bits-and-blooms/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml deleted file mode 100644 index 094aa5c..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go - -sudo: false - -branches: - except: - - release - -branches: - only: - - master - - travis - -go: - - "1.11.x" - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; - - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; - - go get github.com/mattn/goveralls - -before_script: - - make deps - -script: - - make qa - -after_failure: - - cat ./target/test/report.xml - -after_success: - - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/bits-and-blooms/bitset/LICENSE b/vendor/github.com/bits-and-blooms/bitset/LICENSE deleted file mode 100644 index 59cab8a..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 Will Fitzgerald. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/bits-and-blooms/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md deleted file mode 100644 index 97e8307..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# bitset - -*Go language library to map between non-negative integers and boolean values* - -[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) -[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc) - - -## Description - -Package bitset implements bitsets, a mapping between non-negative integers and boolean values. -It should be more efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing individual integers. - -But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. - -Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. - -### Example use: - -```go -package main - -import ( - "fmt" - "math/rand" - - "github.com/bits-and-blooms/bitset" -) - -func main() { - fmt.Printf("Hello from BitSet!\n") - var b bitset.BitSet - // play some Go Fish - for i := 0; i < 100; i++ { - card1 := uint(rand.Intn(52)) - card2 := uint(rand.Intn(52)) - b.Set(card1) - if b.Test(card2) { - fmt.Println("Go Fish!") - } - b.Clear(card1) - } - - // Chaining - b.Set(10).Set(11) - - for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { - fmt.Println("The following bit is set:", i) - } - if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { - fmt.Println("Intersection works.") - } else { - fmt.Println("Intersection doesn't work???") - } -} -``` - -As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. - -Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc - -## Memory Usage - -The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). - -## Implementation Note - -Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. - -It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. - -## Installation - -```bash -go get github.com/bits-and-blooms/bitset -``` - -## Contributing - -If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") - -## Running all tests - -Before committing the code, please check if it passes tests, has adequate coverage, etc. -```bash -go test -go test -cover -``` diff --git a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml deleted file mode 100644 index f9b2959..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Go -# Build your Go project. -# Add steps that test, save build artifacts, deploy, and more: -# https://docs.microsoft.com/azure/devops/pipelines/languages/go - -trigger: -- master - -pool: - vmImage: 'Ubuntu-16.04' - -variables: - GOBIN: '$(GOPATH)/bin' # Go binaries path - GOROOT: '/usr/local/go1.11' # Go installation path - GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path - modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code - -steps: -- script: | - mkdir -p '$(GOBIN)' - mkdir -p '$(GOPATH)/pkg' - mkdir -p '$(modulePath)' - shopt -s extglob - shopt -s dotglob - mv !(gopath) '$(modulePath)' - echo '##vso[task.prependpath]$(GOBIN)' - echo '##vso[task.prependpath]$(GOROOT)/bin' - displayName: 'Set up the Go workspace' - -- script: | - go version - go get -v -t -d ./... - if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure - fi - go build -v . - workingDirectory: '$(modulePath)' - displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/bits-and-blooms/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go deleted file mode 100644 index d688806..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/bitset.go +++ /dev/null @@ -1,952 +0,0 @@ -/* -Package bitset implements bitsets, a mapping -between non-negative integers and boolean values. It should be more -efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing -individual integers. - -But it also provides set intersection, union, difference, -complement, and symmetric operations, as well as tests to -check whether any, all, or no bits are set, and querying a -bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the -memory allocation is approximately Max bits, where Max is -the largest set bit. BitSets are never shrunk. On creation, -a hint can be given for the number of bits that will be used. - -Many of the methods, including Set,Clear, and Flip, return -a BitSet pointer, which allows for chaining. - -Example use: - - import "bitset" - var b BitSet - b.Set(10).Set(11) - if b.Test(1000) { - b.Clear(1000) - } - if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { - fmt.Println("Intersection works.") - } - -As an alternative to BitSets, one should check out the 'big' package, -which provides a (less set-theoretical) view of bitsets. - -*/ -package bitset - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// the wordSize of a bit set -const wordSize = uint(64) - -// log2WordSize is lg(wordSize) -const log2WordSize = uint(6) - -// allBits has every bit set -const allBits uint64 = 0xffffffffffffffff - -// default binary BigEndian -var binaryOrder binary.ByteOrder = binary.BigEndian - -// default json encoding base64.URLEncoding -var base64Encoding = base64.URLEncoding - -// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) -func Base64StdEncoding() { base64Encoding = base64.StdEncoding } - -// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) -func LittleEndian() { binaryOrder = binary.LittleEndian } - -// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. -type BitSet struct { - length uint - set []uint64 -} - -// Error is used to distinguish errors (panics) generated in this package. -type Error string - -// safeSet will fixup b.set to be non-nil and return the field value -func (b *BitSet) safeSet() []uint64 { - if b.set == nil { - b.set = make([]uint64, wordsNeeded(0)) - } - return b.set -} - -// From is a constructor used to create a BitSet from an array of integers -func From(buf []uint64) *BitSet { - return &BitSet{uint(len(buf)) * 64, buf} -} - -// Bytes returns the bitset as array of integers -func (b *BitSet) Bytes() []uint64 { - return b.set -} - -// wordsNeeded calculates the number of words needed for i bits -func wordsNeeded(i uint) int { - if i > (Cap() - wordSize + 1) { - return int(Cap() >> log2WordSize) - } - return int((i + (wordSize - 1)) >> log2WordSize) -} - -// New creates a new BitSet with a hint that length bits will be required -func New(length uint) (bset *BitSet) { - defer func() { - if r := recover(); r != nil { - bset = &BitSet{ - 0, - make([]uint64, 0), - } - } - }() - - bset = &BitSet{ - length, - make([]uint64, wordsNeeded(length)), - } - - return bset -} - -// Cap returns the total possible capacity, or number of bits -func Cap() uint { - return ^uint(0) -} - -// Len returns the number of bits in the BitSet. -// Note the difference to method Count, see example. -func (b *BitSet) Len() uint { - return b.length -} - -// extendSetMaybe adds additional words to incorporate new bits if needed -func (b *BitSet) extendSetMaybe(i uint) { - if i >= b.length { // if we need more bits, make 'em - if i >= Cap() { - panic("You are exceeding the capacity") - } - nsize := wordsNeeded(i + 1) - if b.set == nil { - b.set = make([]uint64, nsize) - } else if cap(b.set) >= nsize { - b.set = b.set[:nsize] // fast resize - } else if len(b.set) < nsize { - newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x - copy(newset, b.set) - b.set = newset - } - b.length = i + 1 - } -} - -// Test whether bit i is set. -func (b *BitSet) Test(i uint) bool { - if i >= b.length { - return false - } - return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 -} - -// Set bit i to 1, the capacity of the bitset is automatically -// increased accordingly. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) Set(i uint) *BitSet { - b.extendSetMaybe(i) - b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) - return b -} - -// Clear bit i to 0 -func (b *BitSet) Clear(i uint) *BitSet { - if i >= b.length { - return b - } - b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) - return b -} - -// SetTo sets bit i to value. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) SetTo(i uint, value bool) *BitSet { - if value { - return b.Set(i) - } - return b.Clear(i) -} - -// Flip bit at i. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) Flip(i uint) *BitSet { - if i >= b.length { - return b.Set(i) - } - b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) - return b -} - -// FlipRange bit in [start, end). -// If end>= Cap(), this function will panic. -// Warning: using a very large value for 'end' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) FlipRange(start, end uint) *BitSet { - if start >= end { - return b - } - - b.extendSetMaybe(end - 1) - var startWord uint = start >> log2WordSize - var endWord uint = end >> log2WordSize - b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1))) - for i := startWord; i < endWord; i++ { - b.set[i] = ^b.set[i] - } - b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1)) - return b -} - -// Shrink shrinks BitSet so that the provided value is the last possible -// set value. It clears all bits > the provided index and reduces the size -// and length of the set. -// -// Note that the parameter value is not the new length in bits: it is the -// maximal value that can be stored in the bitset after the function call. -// The new length in bits is the parameter value + 1. Thus it is not possible -// to use this function to set the length to 0, the minimal value of the length -// after this function call is 1. -// -// A new slice is allocated to store the new bits, so you may see an increase in -// memory usage until the GC runs. Normally this should not be a problem, but if you -// have an extremely large BitSet its important to understand that the old BitSet will -// remain in memory until the GC frees it. -func (b *BitSet) Shrink(lastbitindex uint) *BitSet { - length := lastbitindex + 1 - idx := wordsNeeded(length) - if idx > len(b.set) { - return b - } - shrunk := make([]uint64, idx) - copy(shrunk, b.set[:idx]) - b.set = shrunk - b.length = length - b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)))) - return b -} - -// Compact shrinks BitSet to so that we preserve all set bits, while minimizing -// memory usage. Compact calls Shrink. -func (b *BitSet) Compact() *BitSet { - idx := len(b.set) - 1 - for ; idx >= 0 && b.set[idx] == 0; idx-- { - } - newlength := uint((idx + 1) << log2WordSize) - if newlength >= b.length { - return b // nothing to do - } - if newlength > 0 { - return b.Shrink(newlength - 1) - } - // We preserve one word - return b.Shrink(63) -} - -// InsertAt takes an index which indicates where a bit should be -// inserted. Then it shifts all the bits in the set to the left by 1, starting -// from the given index position, and sets the index position to 0. -// -// Depending on the size of your BitSet, and where you are inserting the new entry, -// this method could be extremely slow and in some cases might cause the entire BitSet -// to be recopied. -func (b *BitSet) InsertAt(idx uint) *BitSet { - insertAtElement := (idx >> log2WordSize) - - // if length of set is a multiple of wordSize we need to allocate more space first - if b.isLenExactMultiple() { - b.set = append(b.set, uint64(0)) - } - - var i uint - for i = uint(len(b.set) - 1); i > insertAtElement; i-- { - // all elements above the position where we want to insert can simply by shifted - b.set[i] <<= 1 - - // we take the most significant bit of the previous element and set it as - // the least significant bit of the current element - b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 - } - - // generate a mask to extract the data that we need to shift left - // within the element where we insert a bit - dataMask := ^(uint64(1)< 0x40000 { - buffer.WriteString("...") - break - } - buffer.WriteString(strconv.FormatInt(int64(i), 10)) - i, e = b.NextSet(i + 1) - if e { - buffer.WriteString(",") - } - } - buffer.WriteString("}") - return buffer.String() -} - -// DeleteAt deletes the bit at the given index position from -// within the bitset -// All the bits residing on the left of the deleted bit get -// shifted right by 1 -// The running time of this operation may potentially be -// relatively slow, O(length) -func (b *BitSet) DeleteAt(i uint) *BitSet { - // the index of the slice element where we'll delete a bit - deleteAtElement := i >> log2WordSize - - // generate a mask for the data that needs to be shifted right - // within that slice element that gets modified - dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) - - // extract the data that we'll shift right from the slice element - data := b.set[deleteAtElement] & dataMask - - // set the masked area to 0 while leaving the rest as it is - b.set[deleteAtElement] &= ^dataMask - - // shift the previously extracted data to the right and then - // set it in the previously masked area - b.set[deleteAtElement] |= (data >> 1) & dataMask - - // loop over all the consecutive slice elements to copy each - // lowest bit into the highest position of the previous element, - // then shift the entire content to the right by 1 - for i := int(deleteAtElement) + 1; i < len(b.set); i++ { - b.set[i-1] |= (b.set[i] & 1) << 63 - b.set[i] >>= 1 - } - - b.length = b.length - 1 - - return b -} - -// NextSet returns the next bit set from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no set bit found) -// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} -// -// Users concerned with performance may want to use NextSetMany to -// retrieve several values at once. -func (b *BitSet) NextSet(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - if w != 0 { - return i + trailingZeroes64(w), true - } - x = x + 1 - for x < len(b.set) { - if b.set[x] != 0 { - return uint(x)*wordSize + trailingZeroes64(b.set[x]), true - } - x = x + 1 - - } - return 0, false -} - -// NextSetMany returns many next bit sets from the specified index, -// including possibly the current index and up to cap(buffer). -// If the returned slice has len zero, then no more set bits were found -// -// buffer := make([]uint, 256) // this should be reused -// j := uint(0) -// j, buffer = bitmap.NextSetMany(j, buffer) -// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { -// for k := range buffer { -// do something with buffer[k] -// } -// j += 1 -// } -// -// -// It is possible to retrieve all set bits as follow: -// -// indices := make([]uint, bitmap.Count()) -// bitmap.NextSetMany(0, indices) -// -// However if bitmap.Count() is large, it might be preferable to -// use several calls to NextSetMany, for performance reasons. -func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { - myanswer := buffer - capacity := cap(buffer) - x := int(i >> log2WordSize) - if x >= len(b.set) || capacity == 0 { - return 0, myanswer[:0] - } - skip := i & (wordSize - 1) - word := b.set[x] >> skip - myanswer = myanswer[:capacity] - size := int(0) - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + i - size++ - if size == capacity { - goto End - } - word = word ^ t - } - x++ - for idx, word := range b.set[x:] { - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + (uint(x+idx) << 6) - size++ - if size == capacity { - goto End - } - word = word ^ t - } - } -End: - if size > 0 { - return myanswer[size-1], myanswer[:size] - } - return 0, myanswer[:0] -} - -// NextClear returns the next clear bit from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no bit found i.e. all bits are set) -func (b *BitSet) NextClear(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - wA := allBits >> (i & (wordSize - 1)) - index := i + trailingZeroes64(^w) - if w != wA && index < b.length { - return index, true - } - x++ - for x < len(b.set) { - index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) - if b.set[x] != allBits && index < b.length { - return index, true - } - x++ - } - return 0, false -} - -// ClearAll clears the entire BitSet -func (b *BitSet) ClearAll() *BitSet { - if b != nil && b.set != nil { - for i := range b.set { - b.set[i] = 0 - } - } - return b -} - -// wordCount returns the number of words used in a bit set -func (b *BitSet) wordCount() int { - return len(b.set) -} - -// Clone this BitSet -func (b *BitSet) Clone() *BitSet { - c := New(b.length) - if b.set != nil { // Clone should not modify current object - copy(c.set, b.set) - } - return c -} - -// Copy into a destination BitSet -// Returning the size of the destination BitSet -// like array copy -func (b *BitSet) Copy(c *BitSet) (count uint) { - if c == nil { - return - } - if b.set != nil { // Copy should not modify current object - copy(c.set, b.set) - } - count = c.length - if b.length < c.length { - count = b.length - } - return -} - -// Count (number of set bits). -// Also known as "popcount" or "population count". -func (b *BitSet) Count() uint { - if b != nil && b.set != nil { - return uint(popcntSlice(b.set)) - } - return 0 -} - -// Equal tests the equivalence of two BitSets. -// False if they are of different sizes, otherwise true -// only if all the same bits are set -func (b *BitSet) Equal(c *BitSet) bool { - if c == nil || b == nil { - return c == b - } - if b.length != c.length { - return false - } - if b.length == 0 { // if they have both length == 0, then could have nil set - return true - } - // testing for equality shoud not transform the bitset (no call to safeSet) - - for p, v := range b.set { - if c.set[p] != v { - return false - } - } - return true -} - -func panicIfNull(b *BitSet) { - if b == nil { - panic(Error("BitSet must not be null")) - } -} - -// Difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - result = b.Clone() // clone b (in case b is bigger than compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - result.set[i] = b.set[i] &^ compare.set[i] - } - return -} - -// DifferenceCardinality computes the cardinality of the differnce -func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - cnt := uint64(0) - cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) - cnt += popcntSlice(b.set[l:]) - return uint(cnt) -} - -// InPlaceDifference computes the difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) InPlaceDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &^= compare.set[i] - } -} - -// Convenience function: return two bitsets ordered by -// increasing length. Note: neither can be nil -func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { - if a.length <= b.length { - ap, bp = a, b - } else { - ap, bp = b, a - } - return -} - -// Intersection of base set and other set -// This is the BitSet equivalent of & (and) -func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = New(b.length) - for i, word := range b.set { - result.set[i] = word & compare.set[i] - } - return -} - -// IntersectionCardinality computes the cardinality of the union -func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntAndSlice(b.set, compare.set) - return uint(cnt) -} - -// InPlaceIntersection destructively computes the intersection of -// base set and the compare set. -// This is the BitSet equivalent of & (and) -func (b *BitSet) InPlaceIntersection(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &= compare.set[i] - } - for i := l; i < len(b.set); i++ { - b.set[i] = 0 - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } -} - -// Union of base set and other set -// This is the BitSet equivalent of | (or) -func (b *BitSet) Union(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word | compare.set[i] - } - return -} - -// UnionCardinality computes the cardinality of the uniton of the base set -// and the compare set. -func (b *BitSet) UnionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntOrSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceUnion creates the destructive union of base set and compare set. -// This is the BitSet equivalent of | (or). -func (b *BitSet) InPlaceUnion(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] |= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - // compare is bigger, so clone it - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word ^ compare.set[i] - } - return -} - -// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference -func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntXorSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] ^= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// Is the length an exact multiple of word sizes? -func (b *BitSet) isLenExactMultiple() bool { - return b.length%wordSize == 0 -} - -// Clean last word by setting unused bits to 0 -func (b *BitSet) cleanLastWord() { - if !b.isLenExactMultiple() { - b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) - } -} - -// Complement computes the (local) complement of a biset (up to length bits) -func (b *BitSet) Complement() (result *BitSet) { - panicIfNull(b) - result = New(b.length) - for i, word := range b.set { - result.set[i] = ^word - } - result.cleanLastWord() - return -} - -// All returns true if all bits are set, false otherwise. Returns true for -// empty sets. -func (b *BitSet) All() bool { - panicIfNull(b) - return b.Count() == b.length -} - -// None returns true if no bit is set, false otherwise. Returns true for -// empty sets. -func (b *BitSet) None() bool { - panicIfNull(b) - if b != nil && b.set != nil { - for _, word := range b.set { - if word > 0 { - return false - } - } - return true - } - return true -} - -// Any returns true if any bit is set, false otherwise -func (b *BitSet) Any() bool { - panicIfNull(b) - return !b.None() -} - -// IsSuperSet returns true if this is a superset of the other set -func (b *BitSet) IsSuperSet(other *BitSet) bool { - for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { - if !b.Test(i) { - return false - } - } - return true -} - -// IsStrictSuperSet returns true if this is a strict superset of the other set -func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { - return b.Count() > other.Count() && b.IsSuperSet(other) -} - -// DumpAsBits dumps a bit set as a string of bits -func (b *BitSet) DumpAsBits() string { - if b.set == nil { - return "." - } - buffer := bytes.NewBufferString("") - i := len(b.set) - 1 - for ; i >= 0; i-- { - fmt.Fprintf(buffer, "%064b.", b.set[i]) - } - return buffer.String() -} - -// BinaryStorageSize returns the binary storage requirements -func (b *BitSet) BinaryStorageSize() int { - return binary.Size(uint64(0)) + binary.Size(b.set) -} - -// WriteTo writes a BitSet to a stream -func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { - length := uint64(b.length) - - // Write length - err := binary.Write(stream, binaryOrder, length) - if err != nil { - return 0, err - } - - // Write set - err = binary.Write(stream, binaryOrder, b.set) - return int64(b.BinaryStorageSize()), err -} - -// ReadFrom reads a BitSet from a stream written using WriteTo -func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { - var length uint64 - - // Read length first - err := binary.Read(stream, binaryOrder, &length) - if err != nil { - return 0, err - } - newset := New(uint(length)) - - if uint64(newset.length) != length { - return 0, errors.New("unmarshalling error: type mismatch") - } - - // Read remaining bytes as set - err = binary.Read(stream, binaryOrder, newset.set) - if err != nil { - return 0, err - } - - *b = *newset - return int64(b.BinaryStorageSize()), nil -} - -// MarshalBinary encodes a BitSet into a binary form and returns the result. -func (b *BitSet) MarshalBinary() ([]byte, error) { - var buf bytes.Buffer - writer := bufio.NewWriter(&buf) - - _, err := b.WriteTo(writer) - if err != nil { - return []byte{}, err - } - - err = writer.Flush() - - return buf.Bytes(), err -} - -// UnmarshalBinary decodes the binary form generated by MarshalBinary. -func (b *BitSet) UnmarshalBinary(data []byte) error { - buf := bytes.NewReader(data) - reader := bufio.NewReader(buf) - - _, err := b.ReadFrom(reader) - - return err -} - -// MarshalJSON marshals a BitSet as a JSON structure -func (b *BitSet) MarshalJSON() ([]byte, error) { - buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) - _, err := b.WriteTo(buffer) - if err != nil { - return nil, err - } - - // URLEncode all bytes - return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) -} - -// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON -func (b *BitSet) UnmarshalJSON(data []byte) error { - // Unmarshal as string - var s string - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - // URLDecode string - buf, err := base64Encoding.DecodeString(s) - if err != nil { - return err - } - - _, err = b.ReadFrom(bytes.NewReader(buf)) - return err -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go deleted file mode 100644 index 76577a8..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt.go +++ /dev/null @@ -1,53 +0,0 @@ -package bitset - -// bit population count, take from -// https://code.google.com/p/go/issues/detail?id=4988#c11 -// credit: https://code.google.com/u/arnehormann/ -func popcount(x uint64) (n uint64) { - x -= (x >> 1) & 0x5555555555555555 - x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 - x += x >> 4 - x &= 0x0f0f0f0f0f0f0f0f - x *= 0x0101010101010101 - return x >> 56 -} - -func popcntSliceGo(s []uint64) uint64 { - cnt := uint64(0) - for _, x := range s { - cnt += popcount(x) - } - return cnt -} - -func popcntMaskSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] &^ m[i]) - } - return cnt -} - -func popcntAndSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] & m[i]) - } - return cnt -} - -func popcntOrSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] | m[i]) - } - return cnt -} - -func popcntXorSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] ^ m[i]) - } - return cnt -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go deleted file mode 100644 index fc8ff4f..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func popcntSlice(s []uint64) uint64 { - var cnt int - for _, x := range s { - cnt += bits.OnesCount64(x) - } - return uint64(cnt) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] &^ m[i]) - } - return uint64(cnt) -} - -func popcntAndSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] & m[i]) - } - return uint64(cnt) -} - -func popcntOrSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] | m[i]) - } - return uint64(cnt) -} - -func popcntXorSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] ^ m[i]) - } - return uint64(cnt) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go deleted file mode 100644 index 4cf64f2..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -package bitset - -// *** the following functions are defined in popcnt_amd64.s - -//go:noescape - -func hasAsm() bool - -// useAsm is a flag used to select the GO or ASM implementation of the popcnt function -var useAsm = hasAsm() - -//go:noescape - -func popcntSliceAsm(s []uint64) uint64 - -//go:noescape - -func popcntMaskSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntAndSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntOrSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntXorSliceAsm(s, m []uint64) uint64 - -func popcntSlice(s []uint64) uint64 { - if useAsm { - return popcntSliceAsm(s) - } - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - if useAsm { - return popcntMaskSliceAsm(s, m) - } - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - if useAsm { - return popcntAndSliceAsm(s, m) - } - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - if useAsm { - return popcntOrSliceAsm(s, m) - } - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - if useAsm { - return popcntXorSliceAsm(s, m) - } - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s deleted file mode 100644 index 666c0dc..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s +++ /dev/null @@ -1,104 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -TEXT ·hasAsm(SB),4,$0-1 -MOVQ $1, AX -CPUID -SHRQ $23, CX -ANDQ $1, CX -MOVB CX, ret+0(FP) -RET - -#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 - -TEXT ·popcntSliceAsm(SB),4,$0-32 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntSliceEnd -popcntSliceLoop: -BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX -ADDQ DX, AX -ADDQ $8, SI -LOOP popcntSliceLoop -popcntSliceEnd: -MOVQ AX, ret+24(FP) -RET - -TEXT ·popcntMaskSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntMaskSliceEnd -MOVQ m+24(FP), DI -popcntMaskSliceLoop: -MOVQ (DI), DX -NOTQ DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntMaskSliceLoop -popcntMaskSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntAndSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntAndSliceEnd -MOVQ m+24(FP), DI -popcntAndSliceLoop: -MOVQ (DI), DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntAndSliceLoop -popcntAndSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntOrSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntOrSliceEnd -MOVQ m+24(FP), DI -popcntOrSliceLoop: -MOVQ (DI), DX -ORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntOrSliceLoop -popcntOrSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntXorSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntXorSliceEnd -MOVQ m+24(FP), DI -popcntXorSliceLoop: -MOVQ (DI), DX -XORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntXorSliceLoop -popcntXorSliceEnd: -MOVQ AX, ret+48(FP) -RET diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go deleted file mode 100644 index 21e0ff7..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.9 -// +build !amd64 appengine - -package bitset - -func popcntSlice(s []uint64) uint64 { - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go deleted file mode 100644 index c52b61b..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package bitset - -var deBruijn = [...]byte{ - 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, - 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, - 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, - 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, -} - -func trailingZeroes64(v uint64) uint { - return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go deleted file mode 100644 index 36a988e..0000000 --- a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func trailingZeroes64(v uint64) uint { - return uint(bits.TrailingZeros64(v)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea8..0000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b5306..0000000 --- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 2fd8693..0000000 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index db0b35f..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - b = b[len(d.mem):] - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b80..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index d580e32..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) - - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 4a5a821..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index fc9bea7..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 53bf76e..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Notes: -// -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. -// -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. -// -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) -} diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format deleted file mode 100644 index 4eb94b1..0000000 --- a/vendor/github.com/cilium/ebpf/.clang-format +++ /dev/null @@ -1,17 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: LLVM -AlignAfterOpenBracket: DontAlign -AlignConsecutiveAssignments: true -AlignEscapedNewlines: DontAlign -AlwaysBreakBeforeMultilineStrings: true -AlwaysBreakTemplateDeclarations: false -AllowAllParametersOfDeclarationOnNextLine: false -AllowShortFunctionsOnASingleLine: false -BreakBeforeBraces: Attach -IndentWidth: 4 -KeepEmptyLinesAtTheStartOfBlocks: false -TabWidth: 4 -UseTab: ForContinuationAndIndentation -ColumnLimit: 1000 -... diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore deleted file mode 100644 index 38b1565..0000000 --- a/vendor/github.com/cilium/ebpf/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib -*.o - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml deleted file mode 100644 index a883741..0000000 --- a/vendor/github.com/cilium/ebpf/.golangci.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -issues: - exclude-rules: - # syscall param structs will have unused fields in Go code. - - path: syscall.*.go - linters: - - structcheck - -linters: - disable-all: true - enable: - - deadcode - - errcheck - - goimports - - gosimple - - govet - - ineffassign - - misspell - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - # Could be enabled later: - # - gocyclo - # - prealloc - # - maligned - # - gosec diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md deleted file mode 100644 index aee9c0a..0000000 --- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md +++ /dev/null @@ -1,80 +0,0 @@ -Architecture of the library -=== - - ELF -> Specifications -> Objects -> Links - -ELF ---- - -BPF is usually produced by using Clang to compile a subset of C. Clang outputs -an ELF file which contains program byte code (aka BPF), but also metadata for -maps used by the program. The metadata follows the conventions set by libbpf -shipped with the kernel. Certain ELF sections have special meaning -and contain structures defined by libbpf. Newer versions of clang emit -additional metadata in BPF Type Format (aka BTF). - -The library aims to be compatible with libbpf so that moving from a C toolchain -to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go) -is tested against the Linux selftests and avoids introducing custom behaviour -if possible. - -The output of the ELF reader is a `CollectionSpec` which encodes -all of the information contained in the ELF in a form that is easy to work with -in Go. - -### BTF - -The BPF Type Format describes more than just the types used by a BPF program. It -includes debug aids like which source line corresponds to which instructions and -what global variables are used. - -[BTF parsing](internal/btf/) lives in a separate internal package since exposing -it would mean an additional maintenance burden, and because the API still -has sharp corners. The most important concept is the `btf.Type` interface, which -also describes things that aren't really types like `.rodata` or `.bss` sections. -`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if -one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as -we write more code that deals with it. - -Specifications ---- - -`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel -objects and contain everything necessary to execute the relevant `bpf(2)` -syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to -modify clang-compiled BPF code, for example to rewrite constants. At the same -time the [asm](asm/) package provides an assembler that can be used to generate -`ProgramSpec` on the fly. - -Creating a spec should never require any privileges or be restricted in any way, -for example by only allowing programs in native endianness. This ensures that -the library stays flexible. - -Objects ---- - -`Program` and `Map` are the result of loading specs into the kernel. Sometimes -loading a spec will fail because the kernel is too old, or a feature is not -enabled. There are multiple ways the library deals with that: - -* Fallback: older kernels don't allowing naming programs and maps. The library - automatically detects support for names, and omits them during load if - necessary. This works since name is primarily a debug aid. - -* Sentinel error: sometimes it's possible to detect that a feature isn't available. - In that case the library will return an error wrapping `ErrNotSupported`. - This is also useful to skip tests that can't run on the current kernel. - -Once program and map objects are loaded they expose the kernel's low-level API, -e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer -wrappers on top of the low-level API, like `MapIterator`. The low-level API is -useful as an out when our higher-level API doesn't support a particular use case. - -Links ---- - -BPF can be attached to many different points in the kernel and newer BPF hooks -tend to use bpf_link to do so. Older hooks unfortunately use a combination of -syscalls, netlink messages, etc. Adding support for a new link type should not -pull in large dependencies like netlink, so XDP programs or tracepoints are -out of scope. diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md deleted file mode 100644 index 8e42838..0000000 --- a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md deleted file mode 100644 index 72ceb43..0000000 --- a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md +++ /dev/null @@ -1,40 +0,0 @@ -# How to contribute - -Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in -the form of pull requests and issues reporting bugs or suggesting new features -are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get -a better understanding for the high-level goals. - -New features must be accompanied by tests. Before starting work on any large -feature, please [join](https://cilium.herokuapp.com/) the -[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack to -discuss the design first. - -When submitting pull requests, consider writing details about what problem you -are solving and why the proposed approach solves that problem in commit messages -and/or pull request description to help future library users and maintainers to -reason about the proposed changes. - -## Running the tests - -Many of the tests require privileges to set resource limits and load eBPF code. -The easiest way to obtain these is to run the tests with `sudo`. - -To test the current package with your local kernel you can simply run: -``` -go test -exec sudo ./... -``` - -To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script. -It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed. - -Examples: - -```bash -# Run all tests on a 5.4 kernel -./run-tests.sh 5.4 - -# Run a subset of tests: -./run-tests.sh 5.4 go test ./link -``` - diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE deleted file mode 100644 index c637ae9..0000000 --- a/vendor/github.com/cilium/ebpf/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -MIT License - -Copyright (c) 2017 Nathan Sweet -Copyright (c) 2018, 2019 Cloudflare -Copyright (c) 2019 Authors of Cilium - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile deleted file mode 100644 index 5dd342c..0000000 --- a/vendor/github.com/cilium/ebpf/Makefile +++ /dev/null @@ -1,70 +0,0 @@ -# The development version of clang is distributed as the 'clang' binary, -# while stable/released versions have a version number attached. -# Pin the default clang to a stable version. -CLANG ?= clang-12 -CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS) - -# Obtain an absolute path to the directory of the Makefile. -# Assume the Makefile is in the root of the repository. -REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) -UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) - -IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) -VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) - -# clang <8 doesn't tag relocs properly (STT_NOTYPE) -# clang 9 is the first version emitting BTF -TARGETS := \ - testdata/loader-clang-7 \ - testdata/loader-clang-9 \ - testdata/loader-$(CLANG) \ - testdata/invalid_map \ - testdata/raw_tracepoint \ - testdata/invalid_map_static \ - testdata/initialized_btf_map \ - testdata/strings \ - internal/btf/testdata/relocs - -.PHONY: all clean docker-all docker-shell - -.DEFAULT_TARGET = docker-all - -# Build all ELF binaries using a Dockerized LLVM toolchain. -docker-all: - docker run --rm --user "${UIDGID}" \ - -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ - --env CFLAGS="-fdebug-prefix-map=/ebpf=." \ - "${IMAGE}:${VERSION}" \ - make all - -# (debug) Drop the user into a shell inside the Docker container as root. -docker-shell: - docker run --rm -ti \ - -v "${REPODIR}":/ebpf -w /ebpf \ - "${IMAGE}:${VERSION}" - -clean: - -$(RM) testdata/*.elf - -$(RM) internal/btf/testdata/*.elf - -all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) - ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf - ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf - -testdata/loader-%-el.elf: testdata/loader.c - $* $(CFLAGS) -mlittle-endian -c $< -o $@ - -testdata/loader-%-eb.elf: testdata/loader.c - $* $(CFLAGS) -mbig-endian -c $< -o $@ - -%-el.elf: %.c - $(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@ - -%-eb.elf : %.c - $(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@ - -# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf -.PHONY: vmlinux-btf -vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz -internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX) - objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@" diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md deleted file mode 100644 index 76c3c30..0000000 --- a/vendor/github.com/cilium/ebpf/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# eBPF - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) - -eBPF is a pure Go library that provides utilities for loading, compiling, and -debugging eBPF programs. It has minimal external dependencies and is intended to -be used in long running processes. - -* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic - assembler -* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF - to various hooks -* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a - `PERF_EVENT_ARRAY` -* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows - compiling and embedding eBPF programs in Go code - -The library is maintained by [Cloudflare](https://www.cloudflare.com) and -[Cilium](https://www.cilium.io). Feel free to -[join](https://cilium.herokuapp.com/) the -[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack. - -## Current status - -The package is production ready, but **the API is explicitly unstable right -now**. Expect to update your code if you want to follow along. - -## Getting Started - -A small collection of Go and eBPF programs that serve as examples for building -your own tools can be found under [examples/](examples/). - -Contributions are highly encouraged, as they highlight certain use cases of -eBPF and the library, and help shape the future of the project. - -## Requirements - -* A version of Go that is [supported by - upstream](https://golang.org/doc/devel/release.html#policy) -* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested) - -## Useful resources - -* [eBPF.io](https://ebpf.io) (recommended) -* [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide) - (recommended) -* [Linux documentation on - BPF](https://www.kernel.org/doc/html/latest/networking/filter.html) -* [eBPF features by Linux - version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md) - -## Regenerating Testdata - -Run `make` in the root of this repository to rebuild testdata in all -subpackages. This requires Docker, as it relies on a standardized build -environment to keep the build output stable. - -The toolchain image build files are kept in [testdata/docker/](testdata/docker/). - -## License - -MIT diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go deleted file mode 100644 index 70ccc4d..0000000 --- a/vendor/github.com/cilium/ebpf/asm/alu.go +++ /dev/null @@ -1,149 +0,0 @@ -package asm - -//go:generate stringer -output alu_string.go -type=Source,Endianness,ALUOp - -// Source of ALU / ALU64 / Branch operations -// -// msb lsb -// +----+-+---+ -// |op |S|cls| -// +----+-+---+ -type Source uint8 - -const sourceMask OpCode = 0x08 - -// Source bitmask -const ( - // InvalidSource is returned by getters when invoked - // on non ALU / branch OpCodes. - InvalidSource Source = 0xff - // ImmSource src is from constant - ImmSource Source = 0x00 - // RegSource src is from register - RegSource Source = 0x08 -) - -// The Endianness of a byte swap instruction. -type Endianness uint8 - -const endianMask = sourceMask - -// Endian flags -const ( - InvalidEndian Endianness = 0xff - // Convert to little endian - LE Endianness = 0x00 - // Convert to big endian - BE Endianness = 0x08 -) - -// ALUOp are ALU / ALU64 operations -// -// msb lsb -// +----+-+---+ -// |OP |s|cls| -// +----+-+---+ -type ALUOp uint8 - -const aluMask OpCode = 0xf0 - -const ( - // InvalidALUOp is returned by getters when invoked - // on non ALU OpCodes - InvalidALUOp ALUOp = 0xff - // Add - addition - Add ALUOp = 0x00 - // Sub - subtraction - Sub ALUOp = 0x10 - // Mul - multiplication - Mul ALUOp = 0x20 - // Div - division - Div ALUOp = 0x30 - // Or - bitwise or - Or ALUOp = 0x40 - // And - bitwise and - And ALUOp = 0x50 - // LSh - bitwise shift left - LSh ALUOp = 0x60 - // RSh - bitwise shift right - RSh ALUOp = 0x70 - // Neg - sign/unsign signing bit - Neg ALUOp = 0x80 - // Mod - modulo - Mod ALUOp = 0x90 - // Xor - bitwise xor - Xor ALUOp = 0xa0 - // Mov - move value from one place to another - Mov ALUOp = 0xb0 - // ArSh - arithmatic shift - ArSh ALUOp = 0xc0 - // Swap - endian conversions - Swap ALUOp = 0xd0 -) - -// HostTo converts from host to another endianness. -func HostTo(endian Endianness, dst Register, size Size) Instruction { - var imm int64 - switch size { - case Half: - imm = 16 - case Word: - imm = 32 - case DWord: - imm = 64 - default: - return Instruction{OpCode: InvalidOpCode} - } - - return Instruction{ - OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)), - Dst: dst, - Constant: imm, - } -} - -// Op returns the OpCode for an ALU operation with a given source. -func (op ALUOp) Op(source Source) OpCode { - return OpCode(ALU64Class).SetALUOp(op).SetSource(source) -} - -// Reg emits `dst (op) src`. -func (op ALUOp) Reg(dst, src Register) Instruction { - return Instruction{ - OpCode: op.Op(RegSource), - Dst: dst, - Src: src, - } -} - -// Imm emits `dst (op) value`. -func (op ALUOp) Imm(dst Register, value int32) Instruction { - return Instruction{ - OpCode: op.Op(ImmSource), - Dst: dst, - Constant: int64(value), - } -} - -// Op32 returns the OpCode for a 32-bit ALU operation with a given source. -func (op ALUOp) Op32(source Source) OpCode { - return OpCode(ALUClass).SetALUOp(op).SetSource(source) -} - -// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst. -func (op ALUOp) Reg32(dst, src Register) Instruction { - return Instruction{ - OpCode: op.Op32(RegSource), - Dst: dst, - Src: src, - } -} - -// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst. -func (op ALUOp) Imm32(dst Register, value int32) Instruction { - return Instruction{ - OpCode: op.Op32(ImmSource), - Dst: dst, - Constant: int64(value), - } -} diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go deleted file mode 100644 index 72d3fe6..0000000 --- a/vendor/github.com/cilium/ebpf/asm/alu_string.go +++ /dev/null @@ -1,107 +0,0 @@ -// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT. - -package asm - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidSource-255] - _ = x[ImmSource-0] - _ = x[RegSource-8] -} - -const ( - _Source_name_0 = "ImmSource" - _Source_name_1 = "RegSource" - _Source_name_2 = "InvalidSource" -) - -func (i Source) String() string { - switch { - case i == 0: - return _Source_name_0 - case i == 8: - return _Source_name_1 - case i == 255: - return _Source_name_2 - default: - return "Source(" + strconv.FormatInt(int64(i), 10) + ")" - } -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidEndian-255] - _ = x[LE-0] - _ = x[BE-8] -} - -const ( - _Endianness_name_0 = "LE" - _Endianness_name_1 = "BE" - _Endianness_name_2 = "InvalidEndian" -) - -func (i Endianness) String() string { - switch { - case i == 0: - return _Endianness_name_0 - case i == 8: - return _Endianness_name_1 - case i == 255: - return _Endianness_name_2 - default: - return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")" - } -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidALUOp-255] - _ = x[Add-0] - _ = x[Sub-16] - _ = x[Mul-32] - _ = x[Div-48] - _ = x[Or-64] - _ = x[And-80] - _ = x[LSh-96] - _ = x[RSh-112] - _ = x[Neg-128] - _ = x[Mod-144] - _ = x[Xor-160] - _ = x[Mov-176] - _ = x[ArSh-192] - _ = x[Swap-208] -} - -const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp" - -var _ALUOp_map = map[ALUOp]string{ - 0: _ALUOp_name[0:3], - 16: _ALUOp_name[3:6], - 32: _ALUOp_name[6:9], - 48: _ALUOp_name[9:12], - 64: _ALUOp_name[12:14], - 80: _ALUOp_name[14:17], - 96: _ALUOp_name[17:20], - 112: _ALUOp_name[20:23], - 128: _ALUOp_name[23:26], - 144: _ALUOp_name[26:29], - 160: _ALUOp_name[29:32], - 176: _ALUOp_name[32:35], - 192: _ALUOp_name[35:39], - 208: _ALUOp_name[39:43], - 255: _ALUOp_name[43:55], -} - -func (i ALUOp) String() string { - if str, ok := _ALUOp_map[i]; ok { - return str - } - return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go deleted file mode 100644 index 7031bdc..0000000 --- a/vendor/github.com/cilium/ebpf/asm/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package asm is an assembler for eBPF bytecode. -package asm diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go deleted file mode 100644 index aee2c7a..0000000 --- a/vendor/github.com/cilium/ebpf/asm/func.go +++ /dev/null @@ -1,195 +0,0 @@ -package asm - -//go:generate stringer -output func_string.go -type=BuiltinFunc - -// BuiltinFunc is a built-in eBPF function. -type BuiltinFunc int32 - -// eBPF built-in functions -// -// You can regenerate this list using the following gawk script: -// -// /FN\(.+\),/ { -// match($1, /\((.+)\)/, r) -// split(r[1], p, "_") -// printf "Fn" -// for (i in p) { -// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) -// } -// print "" -// } -// -// The script expects include/uapi/linux/bpf.h as it's input. -const ( - FnUnspec BuiltinFunc = iota - FnMapLookupElem - FnMapUpdateElem - FnMapDeleteElem - FnProbeRead - FnKtimeGetNs - FnTracePrintk - FnGetPrandomU32 - FnGetSmpProcessorId - FnSkbStoreBytes - FnL3CsumReplace - FnL4CsumReplace - FnTailCall - FnCloneRedirect - FnGetCurrentPidTgid - FnGetCurrentUidGid - FnGetCurrentComm - FnGetCgroupClassid - FnSkbVlanPush - FnSkbVlanPop - FnSkbGetTunnelKey - FnSkbSetTunnelKey - FnPerfEventRead - FnRedirect - FnGetRouteRealm - FnPerfEventOutput - FnSkbLoadBytes - FnGetStackid - FnCsumDiff - FnSkbGetTunnelOpt - FnSkbSetTunnelOpt - FnSkbChangeProto - FnSkbChangeType - FnSkbUnderCgroup - FnGetHashRecalc - FnGetCurrentTask - FnProbeWriteUser - FnCurrentTaskUnderCgroup - FnSkbChangeTail - FnSkbPullData - FnCsumUpdate - FnSetHashInvalid - FnGetNumaNodeId - FnSkbChangeHead - FnXdpAdjustHead - FnProbeReadStr - FnGetSocketCookie - FnGetSocketUid - FnSetHash - FnSetsockopt - FnSkbAdjustRoom - FnRedirectMap - FnSkRedirectMap - FnSockMapUpdate - FnXdpAdjustMeta - FnPerfEventReadValue - FnPerfProgReadValue - FnGetsockopt - FnOverrideReturn - FnSockOpsCbFlagsSet - FnMsgRedirectMap - FnMsgApplyBytes - FnMsgCorkBytes - FnMsgPullData - FnBind - FnXdpAdjustTail - FnSkbGetXfrmState - FnGetStack - FnSkbLoadBytesRelative - FnFibLookup - FnSockHashUpdate - FnMsgRedirectHash - FnSkRedirectHash - FnLwtPushEncap - FnLwtSeg6StoreBytes - FnLwtSeg6AdjustSrh - FnLwtSeg6Action - FnRcRepeat - FnRcKeydown - FnSkbCgroupId - FnGetCurrentCgroupId - FnGetLocalStorage - FnSkSelectReuseport - FnSkbAncestorCgroupId - FnSkLookupTcp - FnSkLookupUdp - FnSkRelease - FnMapPushElem - FnMapPopElem - FnMapPeekElem - FnMsgPushData - FnMsgPopData - FnRcPointerRel - FnSpinLock - FnSpinUnlock - FnSkFullsock - FnTcpSock - FnSkbEcnSetCe - FnGetListenerSock - FnSkcLookupTcp - FnTcpCheckSyncookie - FnSysctlGetName - FnSysctlGetCurrentValue - FnSysctlGetNewValue - FnSysctlSetNewValue - FnStrtol - FnStrtoul - FnSkStorageGet - FnSkStorageDelete - FnSendSignal - FnTcpGenSyncookie - FnSkbOutput - FnProbeReadUser - FnProbeReadKernel - FnProbeReadUserStr - FnProbeReadKernelStr - FnTcpSendAck - FnSendSignalThread - FnJiffies64 - FnReadBranchRecords - FnGetNsCurrentPidTgid - FnXdpOutput - FnGetNetnsCookie - FnGetCurrentAncestorCgroupId - FnSkAssign - FnKtimeGetBootNs - FnSeqPrintf - FnSeqWrite - FnSkCgroupId - FnSkAncestorCgroupId - FnRingbufOutput - FnRingbufReserve - FnRingbufSubmit - FnRingbufDiscard - FnRingbufQuery - FnCsumLevel - FnSkcToTcp6Sock - FnSkcToTcpSock - FnSkcToTcpTimewaitSock - FnSkcToTcpRequestSock - FnSkcToUdp6Sock - FnGetTaskStack - FnLoadHdrOpt - FnStoreHdrOpt - FnReserveHdrOpt - FnInodeStorageGet - FnInodeStorageDelete - FnDPath - FnCopyFromUser - FnSnprintfBtf - FnSeqPrintfBtf - FnSkbCgroupClassid - FnRedirectNeigh - FnPerCpuPtr - FnThisCpuPtr - FnRedirectPeer - FnTaskStorageGet - FnTaskStorageDelete - FnGetCurrentTaskBtf - FnBprmOptsSet - FnKtimeGetCoarseNs - FnImaInodeHash - FnSockFromFile -) - -// Call emits a function call. -func (fn BuiltinFunc) Call() Instruction { - return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(Call), - Constant: int64(fn), - } -} diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go deleted file mode 100644 index a712c5d..0000000 --- a/vendor/github.com/cilium/ebpf/asm/func_string.go +++ /dev/null @@ -1,185 +0,0 @@ -// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT. - -package asm - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[FnUnspec-0] - _ = x[FnMapLookupElem-1] - _ = x[FnMapUpdateElem-2] - _ = x[FnMapDeleteElem-3] - _ = x[FnProbeRead-4] - _ = x[FnKtimeGetNs-5] - _ = x[FnTracePrintk-6] - _ = x[FnGetPrandomU32-7] - _ = x[FnGetSmpProcessorId-8] - _ = x[FnSkbStoreBytes-9] - _ = x[FnL3CsumReplace-10] - _ = x[FnL4CsumReplace-11] - _ = x[FnTailCall-12] - _ = x[FnCloneRedirect-13] - _ = x[FnGetCurrentPidTgid-14] - _ = x[FnGetCurrentUidGid-15] - _ = x[FnGetCurrentComm-16] - _ = x[FnGetCgroupClassid-17] - _ = x[FnSkbVlanPush-18] - _ = x[FnSkbVlanPop-19] - _ = x[FnSkbGetTunnelKey-20] - _ = x[FnSkbSetTunnelKey-21] - _ = x[FnPerfEventRead-22] - _ = x[FnRedirect-23] - _ = x[FnGetRouteRealm-24] - _ = x[FnPerfEventOutput-25] - _ = x[FnSkbLoadBytes-26] - _ = x[FnGetStackid-27] - _ = x[FnCsumDiff-28] - _ = x[FnSkbGetTunnelOpt-29] - _ = x[FnSkbSetTunnelOpt-30] - _ = x[FnSkbChangeProto-31] - _ = x[FnSkbChangeType-32] - _ = x[FnSkbUnderCgroup-33] - _ = x[FnGetHashRecalc-34] - _ = x[FnGetCurrentTask-35] - _ = x[FnProbeWriteUser-36] - _ = x[FnCurrentTaskUnderCgroup-37] - _ = x[FnSkbChangeTail-38] - _ = x[FnSkbPullData-39] - _ = x[FnCsumUpdate-40] - _ = x[FnSetHashInvalid-41] - _ = x[FnGetNumaNodeId-42] - _ = x[FnSkbChangeHead-43] - _ = x[FnXdpAdjustHead-44] - _ = x[FnProbeReadStr-45] - _ = x[FnGetSocketCookie-46] - _ = x[FnGetSocketUid-47] - _ = x[FnSetHash-48] - _ = x[FnSetsockopt-49] - _ = x[FnSkbAdjustRoom-50] - _ = x[FnRedirectMap-51] - _ = x[FnSkRedirectMap-52] - _ = x[FnSockMapUpdate-53] - _ = x[FnXdpAdjustMeta-54] - _ = x[FnPerfEventReadValue-55] - _ = x[FnPerfProgReadValue-56] - _ = x[FnGetsockopt-57] - _ = x[FnOverrideReturn-58] - _ = x[FnSockOpsCbFlagsSet-59] - _ = x[FnMsgRedirectMap-60] - _ = x[FnMsgApplyBytes-61] - _ = x[FnMsgCorkBytes-62] - _ = x[FnMsgPullData-63] - _ = x[FnBind-64] - _ = x[FnXdpAdjustTail-65] - _ = x[FnSkbGetXfrmState-66] - _ = x[FnGetStack-67] - _ = x[FnSkbLoadBytesRelative-68] - _ = x[FnFibLookup-69] - _ = x[FnSockHashUpdate-70] - _ = x[FnMsgRedirectHash-71] - _ = x[FnSkRedirectHash-72] - _ = x[FnLwtPushEncap-73] - _ = x[FnLwtSeg6StoreBytes-74] - _ = x[FnLwtSeg6AdjustSrh-75] - _ = x[FnLwtSeg6Action-76] - _ = x[FnRcRepeat-77] - _ = x[FnRcKeydown-78] - _ = x[FnSkbCgroupId-79] - _ = x[FnGetCurrentCgroupId-80] - _ = x[FnGetLocalStorage-81] - _ = x[FnSkSelectReuseport-82] - _ = x[FnSkbAncestorCgroupId-83] - _ = x[FnSkLookupTcp-84] - _ = x[FnSkLookupUdp-85] - _ = x[FnSkRelease-86] - _ = x[FnMapPushElem-87] - _ = x[FnMapPopElem-88] - _ = x[FnMapPeekElem-89] - _ = x[FnMsgPushData-90] - _ = x[FnMsgPopData-91] - _ = x[FnRcPointerRel-92] - _ = x[FnSpinLock-93] - _ = x[FnSpinUnlock-94] - _ = x[FnSkFullsock-95] - _ = x[FnTcpSock-96] - _ = x[FnSkbEcnSetCe-97] - _ = x[FnGetListenerSock-98] - _ = x[FnSkcLookupTcp-99] - _ = x[FnTcpCheckSyncookie-100] - _ = x[FnSysctlGetName-101] - _ = x[FnSysctlGetCurrentValue-102] - _ = x[FnSysctlGetNewValue-103] - _ = x[FnSysctlSetNewValue-104] - _ = x[FnStrtol-105] - _ = x[FnStrtoul-106] - _ = x[FnSkStorageGet-107] - _ = x[FnSkStorageDelete-108] - _ = x[FnSendSignal-109] - _ = x[FnTcpGenSyncookie-110] - _ = x[FnSkbOutput-111] - _ = x[FnProbeReadUser-112] - _ = x[FnProbeReadKernel-113] - _ = x[FnProbeReadUserStr-114] - _ = x[FnProbeReadKernelStr-115] - _ = x[FnTcpSendAck-116] - _ = x[FnSendSignalThread-117] - _ = x[FnJiffies64-118] - _ = x[FnReadBranchRecords-119] - _ = x[FnGetNsCurrentPidTgid-120] - _ = x[FnXdpOutput-121] - _ = x[FnGetNetnsCookie-122] - _ = x[FnGetCurrentAncestorCgroupId-123] - _ = x[FnSkAssign-124] - _ = x[FnKtimeGetBootNs-125] - _ = x[FnSeqPrintf-126] - _ = x[FnSeqWrite-127] - _ = x[FnSkCgroupId-128] - _ = x[FnSkAncestorCgroupId-129] - _ = x[FnRingbufOutput-130] - _ = x[FnRingbufReserve-131] - _ = x[FnRingbufSubmit-132] - _ = x[FnRingbufDiscard-133] - _ = x[FnRingbufQuery-134] - _ = x[FnCsumLevel-135] - _ = x[FnSkcToTcp6Sock-136] - _ = x[FnSkcToTcpSock-137] - _ = x[FnSkcToTcpTimewaitSock-138] - _ = x[FnSkcToTcpRequestSock-139] - _ = x[FnSkcToUdp6Sock-140] - _ = x[FnGetTaskStack-141] - _ = x[FnLoadHdrOpt-142] - _ = x[FnStoreHdrOpt-143] - _ = x[FnReserveHdrOpt-144] - _ = x[FnInodeStorageGet-145] - _ = x[FnInodeStorageDelete-146] - _ = x[FnDPath-147] - _ = x[FnCopyFromUser-148] - _ = x[FnSnprintfBtf-149] - _ = x[FnSeqPrintfBtf-150] - _ = x[FnSkbCgroupClassid-151] - _ = x[FnRedirectNeigh-152] - _ = x[FnPerCpuPtr-153] - _ = x[FnThisCpuPtr-154] - _ = x[FnRedirectPeer-155] - _ = x[FnTaskStorageGet-156] - _ = x[FnTaskStorageDelete-157] - _ = x[FnGetCurrentTaskBtf-158] - _ = x[FnBprmOptsSet-159] - _ = x[FnKtimeGetCoarseNs-160] - _ = x[FnImaInodeHash-161] - _ = x[FnSockFromFile-162] -} - -const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFile" - -var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424} - -func (i BuiltinFunc) String() string { - if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { - return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] -} diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go deleted file mode 100644 index e7ac010..0000000 --- a/vendor/github.com/cilium/ebpf/asm/instruction.go +++ /dev/null @@ -1,506 +0,0 @@ -package asm - -import ( - "crypto/sha1" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "io" - "math" - "strings" - - "github.com/cilium/ebpf/internal/unix" -) - -// InstructionSize is the size of a BPF instruction in bytes -const InstructionSize = 8 - -// RawInstructionOffset is an offset in units of raw BPF instructions. -type RawInstructionOffset uint64 - -// Bytes returns the offset of an instruction in bytes. -func (rio RawInstructionOffset) Bytes() uint64 { - return uint64(rio) * InstructionSize -} - -// Instruction is a single eBPF instruction. -type Instruction struct { - OpCode OpCode - Dst Register - Src Register - Offset int16 - Constant int64 - Reference string - Symbol string -} - -// Sym creates a symbol. -func (ins Instruction) Sym(name string) Instruction { - ins.Symbol = name - return ins -} - -// Unmarshal decodes a BPF instruction. -func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { - var bi bpfInstruction - err := binary.Read(r, bo, &bi) - if err != nil { - return 0, err - } - - ins.OpCode = bi.OpCode - ins.Offset = bi.Offset - ins.Constant = int64(bi.Constant) - ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo) - if err != nil { - return 0, fmt.Errorf("can't unmarshal registers: %s", err) - } - - if !bi.OpCode.IsDWordLoad() { - return InstructionSize, nil - } - - var bi2 bpfInstruction - if err := binary.Read(r, bo, &bi2); err != nil { - // No Wrap, to avoid io.EOF clash - return 0, errors.New("64bit immediate is missing second half") - } - if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 { - return 0, errors.New("64bit immediate has non-zero fields") - } - ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant))) - - return 2 * InstructionSize, nil -} - -// Marshal encodes a BPF instruction. -func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) { - if ins.OpCode == InvalidOpCode { - return 0, errors.New("invalid opcode") - } - - isDWordLoad := ins.OpCode.IsDWordLoad() - - cons := int32(ins.Constant) - if isDWordLoad { - // Encode least significant 32bit first for 64bit operations. - cons = int32(uint32(ins.Constant)) - } - - regs, err := newBPFRegisters(ins.Dst, ins.Src, bo) - if err != nil { - return 0, fmt.Errorf("can't marshal registers: %s", err) - } - - bpfi := bpfInstruction{ - ins.OpCode, - regs, - ins.Offset, - cons, - } - - if err := binary.Write(w, bo, &bpfi); err != nil { - return 0, err - } - - if !isDWordLoad { - return InstructionSize, nil - } - - bpfi = bpfInstruction{ - Constant: int32(ins.Constant >> 32), - } - - if err := binary.Write(w, bo, &bpfi); err != nil { - return 0, err - } - - return 2 * InstructionSize, nil -} - -// RewriteMapPtr changes an instruction to use a new map fd. -// -// Returns an error if the instruction doesn't load a map. -func (ins *Instruction) RewriteMapPtr(fd int) error { - if !ins.OpCode.IsDWordLoad() { - return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) - } - - if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue { - return errors.New("not a load from a map") - } - - // Preserve the offset value for direct map loads. - offset := uint64(ins.Constant) & (math.MaxUint32 << 32) - rawFd := uint64(uint32(fd)) - ins.Constant = int64(offset | rawFd) - return nil -} - -// MapPtr returns the map fd for this instruction. -// -// The result is undefined if the instruction is not a load from a map, -// see IsLoadFromMap. -func (ins *Instruction) MapPtr() int { - return int(int32(uint64(ins.Constant) & math.MaxUint32)) -} - -// RewriteMapOffset changes the offset of a direct load from a map. -// -// Returns an error if the instruction is not a direct load. -func (ins *Instruction) RewriteMapOffset(offset uint32) error { - if !ins.OpCode.IsDWordLoad() { - return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) - } - - if ins.Src != PseudoMapValue { - return errors.New("not a direct load from a map") - } - - fd := uint64(ins.Constant) & math.MaxUint32 - ins.Constant = int64(uint64(offset)<<32 | fd) - return nil -} - -func (ins *Instruction) mapOffset() uint32 { - return uint32(uint64(ins.Constant) >> 32) -} - -// IsLoadFromMap returns true if the instruction loads from a map. -// -// This covers both loading the map pointer and direct map value loads. -func (ins *Instruction) IsLoadFromMap() bool { - return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue) -} - -// IsFunctionCall returns true if the instruction calls another BPF function. -// -// This is not the same thing as a BPF helper call. -func (ins *Instruction) IsFunctionCall() bool { - return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall -} - -// IsConstantLoad returns true if the instruction loads a constant of the -// given size. -func (ins *Instruction) IsConstantLoad(size Size) bool { - return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0 -} - -// Format implements fmt.Formatter. -func (ins Instruction) Format(f fmt.State, c rune) { - if c != 'v' { - fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c) - return - } - - op := ins.OpCode - - if op == InvalidOpCode { - fmt.Fprint(f, "INVALID") - return - } - - // Omit trailing space for Exit - if op.JumpOp() == Exit { - fmt.Fprint(f, op) - return - } - - if ins.IsLoadFromMap() { - fd := ins.MapPtr() - switch ins.Src { - case PseudoMapFD: - fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) - - case PseudoMapValue: - fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) - } - - goto ref - } - - fmt.Fprintf(f, "%v ", op) - switch cls := op.Class(); cls { - case LdClass, LdXClass, StClass, StXClass: - switch op.Mode() { - case ImmMode: - fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) - case AbsMode: - fmt.Fprintf(f, "imm: %d", ins.Constant) - case IndMode: - fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) - case MemMode: - fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) - case XAddMode: - fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) - } - - case ALU64Class, ALUClass: - fmt.Fprintf(f, "dst: %s ", ins.Dst) - if op.ALUOp() == Swap || op.Source() == ImmSource { - fmt.Fprintf(f, "imm: %d", ins.Constant) - } else { - fmt.Fprintf(f, "src: %s", ins.Src) - } - - case JumpClass: - switch jop := op.JumpOp(); jop { - case Call: - if ins.Src == PseudoCall { - // bpf-to-bpf call - fmt.Fprint(f, ins.Constant) - } else { - fmt.Fprint(f, BuiltinFunc(ins.Constant)) - } - - default: - fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) - if op.Source() == ImmSource { - fmt.Fprintf(f, "imm: %d", ins.Constant) - } else { - fmt.Fprintf(f, "src: %s", ins.Src) - } - } - } - -ref: - if ins.Reference != "" { - fmt.Fprintf(f, " <%s>", ins.Reference) - } -} - -// Instructions is an eBPF program. -type Instructions []Instruction - -func (insns Instructions) String() string { - return fmt.Sprint(insns) -} - -// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. -// -// Returns an error if the symbol isn't used, see IsUnreferencedSymbol. -func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { - if symbol == "" { - return errors.New("empty symbol") - } - - found := false - for i := range insns { - ins := &insns[i] - if ins.Reference != symbol { - continue - } - - if err := ins.RewriteMapPtr(fd); err != nil { - return err - } - - found = true - } - - if !found { - return &unreferencedSymbolError{symbol} - } - - return nil -} - -// SymbolOffsets returns the set of symbols and their offset in -// the instructions. -func (insns Instructions) SymbolOffsets() (map[string]int, error) { - offsets := make(map[string]int) - - for i, ins := range insns { - if ins.Symbol == "" { - continue - } - - if _, ok := offsets[ins.Symbol]; ok { - return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol) - } - - offsets[ins.Symbol] = i - } - - return offsets, nil -} - -// ReferenceOffsets returns the set of references and their offset in -// the instructions. -func (insns Instructions) ReferenceOffsets() map[string][]int { - offsets := make(map[string][]int) - - for i, ins := range insns { - if ins.Reference == "" { - continue - } - - offsets[ins.Reference] = append(offsets[ins.Reference], i) - } - - return offsets -} - -// Format implements fmt.Formatter. -// -// You can control indentation of symbols by -// specifying a width. Setting a precision controls the indentation of -// instructions. -// The default character is a tab, which can be overridden by specifying -// the ' ' space flag. -func (insns Instructions) Format(f fmt.State, c rune) { - if c != 's' && c != 'v' { - fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c) - return - } - - // Precision is better in this case, because it allows - // specifying 0 padding easily. - padding, ok := f.Precision() - if !ok { - padding = 1 - } - - indent := strings.Repeat("\t", padding) - if f.Flag(' ') { - indent = strings.Repeat(" ", padding) - } - - symPadding, ok := f.Width() - if !ok { - symPadding = padding - 1 - } - if symPadding < 0 { - symPadding = 0 - } - - symIndent := strings.Repeat("\t", symPadding) - if f.Flag(' ') { - symIndent = strings.Repeat(" ", symPadding) - } - - // Guess how many digits we need at most, by assuming that all instructions - // are double wide. - highestOffset := len(insns) * 2 - offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset)))) - - iter := insns.Iterate() - for iter.Next() { - if iter.Ins.Symbol != "" { - fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol) - } - fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) - } -} - -// Marshal encodes a BPF program into the kernel format. -func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { - for i, ins := range insns { - _, err := ins.Marshal(w, bo) - if err != nil { - return fmt.Errorf("instruction %d: %w", i, err) - } - } - return nil -} - -// Tag calculates the kernel tag for a series of instructions. -// -// It mirrors bpf_prog_calc_tag in the kernel and so can be compared -// to ProgramInfo.Tag to figure out whether a loaded program matches -// certain instructions. -func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { - h := sha1.New() - for i, ins := range insns { - if ins.IsLoadFromMap() { - ins.Constant = 0 - } - _, err := ins.Marshal(h, bo) - if err != nil { - return "", fmt.Errorf("instruction %d: %w", i, err) - } - } - return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil -} - -// Iterate allows iterating a BPF program while keeping track of -// various offsets. -// -// Modifying the instruction slice will lead to undefined behaviour. -func (insns Instructions) Iterate() *InstructionIterator { - return &InstructionIterator{insns: insns} -} - -// InstructionIterator iterates over a BPF program. -type InstructionIterator struct { - insns Instructions - // The instruction in question. - Ins *Instruction - // The index of the instruction in the original instruction slice. - Index int - // The offset of the instruction in raw BPF instructions. This accounts - // for double-wide instructions. - Offset RawInstructionOffset -} - -// Next returns true as long as there are any instructions remaining. -func (iter *InstructionIterator) Next() bool { - if len(iter.insns) == 0 { - return false - } - - if iter.Ins != nil { - iter.Index++ - iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions()) - } - iter.Ins = &iter.insns[0] - iter.insns = iter.insns[1:] - return true -} - -type bpfInstruction struct { - OpCode OpCode - Registers bpfRegisters - Offset int16 - Constant int32 -} - -type bpfRegisters uint8 - -func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { - switch bo { - case binary.LittleEndian: - return bpfRegisters((src << 4) | (dst & 0xF)), nil - case binary.BigEndian: - return bpfRegisters((dst << 4) | (src & 0xF)), nil - default: - return 0, fmt.Errorf("unrecognized ByteOrder %T", bo) - } -} - -func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) { - switch bo { - case binary.LittleEndian: - return Register(r & 0xF), Register(r >> 4), nil - case binary.BigEndian: - return Register(r >> 4), Register(r & 0xf), nil - default: - return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo) - } -} - -type unreferencedSymbolError struct { - symbol string -} - -func (use *unreferencedSymbolError) Error() string { - return fmt.Sprintf("unreferenced symbol %s", use.symbol) -} - -// IsUnreferencedSymbol returns true if err was caused by -// an unreferenced symbol. -func IsUnreferencedSymbol(err error) bool { - _, ok := err.(*unreferencedSymbolError) - return ok -} diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go deleted file mode 100644 index 7757179..0000000 --- a/vendor/github.com/cilium/ebpf/asm/jump.go +++ /dev/null @@ -1,109 +0,0 @@ -package asm - -//go:generate stringer -output jump_string.go -type=JumpOp - -// JumpOp affect control flow. -// -// msb lsb -// +----+-+---+ -// |OP |s|cls| -// +----+-+---+ -type JumpOp uint8 - -const jumpMask OpCode = aluMask - -const ( - // InvalidJumpOp is returned by getters when invoked - // on non branch OpCodes - InvalidJumpOp JumpOp = 0xff - // Ja jumps by offset unconditionally - Ja JumpOp = 0x00 - // JEq jumps by offset if r == imm - JEq JumpOp = 0x10 - // JGT jumps by offset if r > imm - JGT JumpOp = 0x20 - // JGE jumps by offset if r >= imm - JGE JumpOp = 0x30 - // JSet jumps by offset if r & imm - JSet JumpOp = 0x40 - // JNE jumps by offset if r != imm - JNE JumpOp = 0x50 - // JSGT jumps by offset if signed r > signed imm - JSGT JumpOp = 0x60 - // JSGE jumps by offset if signed r >= signed imm - JSGE JumpOp = 0x70 - // Call builtin or user defined function from imm - Call JumpOp = 0x80 - // Exit ends execution, with value in r0 - Exit JumpOp = 0x90 - // JLT jumps by offset if r < imm - JLT JumpOp = 0xa0 - // JLE jumps by offset if r <= imm - JLE JumpOp = 0xb0 - // JSLT jumps by offset if signed r < signed imm - JSLT JumpOp = 0xc0 - // JSLE jumps by offset if signed r <= signed imm - JSLE JumpOp = 0xd0 -) - -// Return emits an exit instruction. -// -// Requires a return value in R0. -func Return() Instruction { - return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(Exit), - } -} - -// Op returns the OpCode for a given jump source. -func (op JumpOp) Op(source Source) OpCode { - return OpCode(JumpClass).SetJumpOp(op).SetSource(source) -} - -// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled. -func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} - } - - return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource), - Dst: dst, - Offset: -1, - Constant: int64(value), - Reference: label, - } -} - -// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled. -func (op JumpOp) Reg(dst, src Register, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} - } - - return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource), - Dst: dst, - Src: src, - Offset: -1, - Reference: label, - } -} - -// Label adjusts PC to the address of the label. -func (op JumpOp) Label(label string) Instruction { - if op == Call { - return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(Call), - Src: PseudoCall, - Constant: -1, - Reference: label, - } - } - - return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op), - Offset: -1, - Reference: label, - } -} diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go deleted file mode 100644 index 85a4aaf..0000000 --- a/vendor/github.com/cilium/ebpf/asm/jump_string.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT. - -package asm - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidJumpOp-255] - _ = x[Ja-0] - _ = x[JEq-16] - _ = x[JGT-32] - _ = x[JGE-48] - _ = x[JSet-64] - _ = x[JNE-80] - _ = x[JSGT-96] - _ = x[JSGE-112] - _ = x[Call-128] - _ = x[Exit-144] - _ = x[JLT-160] - _ = x[JLE-176] - _ = x[JSLT-192] - _ = x[JSLE-208] -} - -const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp" - -var _JumpOp_map = map[JumpOp]string{ - 0: _JumpOp_name[0:2], - 16: _JumpOp_name[2:5], - 32: _JumpOp_name[5:8], - 48: _JumpOp_name[8:11], - 64: _JumpOp_name[11:15], - 80: _JumpOp_name[15:18], - 96: _JumpOp_name[18:22], - 112: _JumpOp_name[22:26], - 128: _JumpOp_name[26:30], - 144: _JumpOp_name[30:34], - 160: _JumpOp_name[34:37], - 176: _JumpOp_name[37:40], - 192: _JumpOp_name[40:44], - 208: _JumpOp_name[44:48], - 255: _JumpOp_name[48:61], -} - -func (i JumpOp) String() string { - if str, ok := _JumpOp_map[i]; ok { - return str - } - return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go deleted file mode 100644 index 85ed286..0000000 --- a/vendor/github.com/cilium/ebpf/asm/load_store.go +++ /dev/null @@ -1,204 +0,0 @@ -package asm - -//go:generate stringer -output load_store_string.go -type=Mode,Size - -// Mode for load and store operations -// -// msb lsb -// +---+--+---+ -// |MDE|sz|cls| -// +---+--+---+ -type Mode uint8 - -const modeMask OpCode = 0xe0 - -const ( - // InvalidMode is returned by getters when invoked - // on non load / store OpCodes - InvalidMode Mode = 0xff - // ImmMode - immediate value - ImmMode Mode = 0x00 - // AbsMode - immediate value + offset - AbsMode Mode = 0x20 - // IndMode - indirect (imm+src) - IndMode Mode = 0x40 - // MemMode - load from memory - MemMode Mode = 0x60 - // XAddMode - add atomically across processors. - XAddMode Mode = 0xc0 -) - -// Size of load and store operations -// -// msb lsb -// +---+--+---+ -// |mde|SZ|cls| -// +---+--+---+ -type Size uint8 - -const sizeMask OpCode = 0x18 - -const ( - // InvalidSize is returned by getters when invoked - // on non load / store OpCodes - InvalidSize Size = 0xff - // DWord - double word; 64 bits - DWord Size = 0x18 - // Word - word; 32 bits - Word Size = 0x00 - // Half - half-word; 16 bits - Half Size = 0x08 - // Byte - byte; 8 bits - Byte Size = 0x10 -) - -// Sizeof returns the size in bytes. -func (s Size) Sizeof() int { - switch s { - case DWord: - return 8 - case Word: - return 4 - case Half: - return 2 - case Byte: - return 1 - default: - return -1 - } -} - -// LoadMemOp returns the OpCode to load a value of given size from memory. -func LoadMemOp(size Size) OpCode { - return OpCode(LdXClass).SetMode(MemMode).SetSize(size) -} - -// LoadMem emits `dst = *(size *)(src + offset)`. -func LoadMem(dst, src Register, offset int16, size Size) Instruction { - return Instruction{ - OpCode: LoadMemOp(size), - Dst: dst, - Src: src, - Offset: offset, - } -} - -// LoadImmOp returns the OpCode to load an immediate of given size. -// -// As of kernel 4.20, only DWord size is accepted. -func LoadImmOp(size Size) OpCode { - return OpCode(LdClass).SetMode(ImmMode).SetSize(size) -} - -// LoadImm emits `dst = (size)value`. -// -// As of kernel 4.20, only DWord size is accepted. -func LoadImm(dst Register, value int64, size Size) Instruction { - return Instruction{ - OpCode: LoadImmOp(size), - Dst: dst, - Constant: value, - } -} - -// LoadMapPtr stores a pointer to a map in dst. -func LoadMapPtr(dst Register, fd int) Instruction { - if fd < 0 { - return Instruction{OpCode: InvalidOpCode} - } - - return Instruction{ - OpCode: LoadImmOp(DWord), - Dst: dst, - Src: PseudoMapFD, - Constant: int64(uint32(fd)), - } -} - -// LoadMapValue stores a pointer to the value at a certain offset of a map. -func LoadMapValue(dst Register, fd int, offset uint32) Instruction { - if fd < 0 { - return Instruction{OpCode: InvalidOpCode} - } - - fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd)) - return Instruction{ - OpCode: LoadImmOp(DWord), - Dst: dst, - Src: PseudoMapValue, - Constant: int64(fdAndOffset), - } -} - -// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff. -func LoadIndOp(size Size) OpCode { - return OpCode(LdClass).SetMode(IndMode).SetSize(size) -} - -// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`. -func LoadInd(dst, src Register, offset int32, size Size) Instruction { - return Instruction{ - OpCode: LoadIndOp(size), - Dst: dst, - Src: src, - Constant: int64(offset), - } -} - -// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff. -func LoadAbsOp(size Size) OpCode { - return OpCode(LdClass).SetMode(AbsMode).SetSize(size) -} - -// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`. -func LoadAbs(offset int32, size Size) Instruction { - return Instruction{ - OpCode: LoadAbsOp(size), - Dst: R0, - Constant: int64(offset), - } -} - -// StoreMemOp returns the OpCode for storing a register of given size in memory. -func StoreMemOp(size Size) OpCode { - return OpCode(StXClass).SetMode(MemMode).SetSize(size) -} - -// StoreMem emits `*(size *)(dst + offset) = src` -func StoreMem(dst Register, offset int16, src Register, size Size) Instruction { - return Instruction{ - OpCode: StoreMemOp(size), - Dst: dst, - Src: src, - Offset: offset, - } -} - -// StoreImmOp returns the OpCode for storing an immediate of given size in memory. -func StoreImmOp(size Size) OpCode { - return OpCode(StClass).SetMode(MemMode).SetSize(size) -} - -// StoreImm emits `*(size *)(dst + offset) = value`. -func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { - return Instruction{ - OpCode: StoreImmOp(size), - Dst: dst, - Offset: offset, - Constant: value, - } -} - -// StoreXAddOp returns the OpCode to atomically add a register to a value in memory. -func StoreXAddOp(size Size) OpCode { - return OpCode(StXClass).SetMode(XAddMode).SetSize(size) -} - -// StoreXAdd atomically adds src to *dst. -func StoreXAdd(dst, src Register, size Size) Instruction { - return Instruction{ - OpCode: StoreXAddOp(size), - Dst: dst, - Src: src, - } -} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go deleted file mode 100644 index 76d29a0..0000000 --- a/vendor/github.com/cilium/ebpf/asm/load_store_string.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT. - -package asm - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidMode-255] - _ = x[ImmMode-0] - _ = x[AbsMode-32] - _ = x[IndMode-64] - _ = x[MemMode-96] - _ = x[XAddMode-192] -} - -const ( - _Mode_name_0 = "ImmMode" - _Mode_name_1 = "AbsMode" - _Mode_name_2 = "IndMode" - _Mode_name_3 = "MemMode" - _Mode_name_4 = "XAddMode" - _Mode_name_5 = "InvalidMode" -) - -func (i Mode) String() string { - switch { - case i == 0: - return _Mode_name_0 - case i == 32: - return _Mode_name_1 - case i == 64: - return _Mode_name_2 - case i == 96: - return _Mode_name_3 - case i == 192: - return _Mode_name_4 - case i == 255: - return _Mode_name_5 - default: - return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidSize-255] - _ = x[DWord-24] - _ = x[Word-0] - _ = x[Half-8] - _ = x[Byte-16] -} - -const ( - _Size_name_0 = "Word" - _Size_name_1 = "Half" - _Size_name_2 = "Byte" - _Size_name_3 = "DWord" - _Size_name_4 = "InvalidSize" -) - -func (i Size) String() string { - switch { - case i == 0: - return _Size_name_0 - case i == 8: - return _Size_name_1 - case i == 16: - return _Size_name_2 - case i == 24: - return _Size_name_3 - case i == 255: - return _Size_name_4 - default: - return "Size(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go deleted file mode 100644 index 6edc3cf..0000000 --- a/vendor/github.com/cilium/ebpf/asm/opcode.go +++ /dev/null @@ -1,237 +0,0 @@ -package asm - -import ( - "fmt" - "strings" -) - -//go:generate stringer -output opcode_string.go -type=Class - -type encoding int - -const ( - unknownEncoding encoding = iota - loadOrStore - jumpOrALU -) - -// Class of operations -// -// msb lsb -// +---+--+---+ -// | ?? |CLS| -// +---+--+---+ -type Class uint8 - -const classMask OpCode = 0x07 - -const ( - // LdClass load memory - LdClass Class = 0x00 - // LdXClass load memory from constant - LdXClass Class = 0x01 - // StClass load register from memory - StClass Class = 0x02 - // StXClass load register from constant - StXClass Class = 0x03 - // ALUClass arithmetic operators - ALUClass Class = 0x04 - // JumpClass jump operators - JumpClass Class = 0x05 - // ALU64Class arithmetic in 64 bit mode - ALU64Class Class = 0x07 -) - -func (cls Class) encoding() encoding { - switch cls { - case LdClass, LdXClass, StClass, StXClass: - return loadOrStore - case ALU64Class, ALUClass, JumpClass: - return jumpOrALU - default: - return unknownEncoding - } -} - -// OpCode is a packed eBPF opcode. -// -// Its encoding is defined by a Class value: -// -// msb lsb -// +----+-+---+ -// | ???? |CLS| -// +----+-+---+ -type OpCode uint8 - -// InvalidOpCode is returned by setters on OpCode -const InvalidOpCode OpCode = 0xff - -// rawInstructions returns the number of BPF instructions required -// to encode this opcode. -func (op OpCode) rawInstructions() int { - if op.IsDWordLoad() { - return 2 - } - return 1 -} - -func (op OpCode) IsDWordLoad() bool { - return op == LoadImmOp(DWord) -} - -// Class returns the class of operation. -func (op OpCode) Class() Class { - return Class(op & classMask) -} - -// Mode returns the mode for load and store operations. -func (op OpCode) Mode() Mode { - if op.Class().encoding() != loadOrStore { - return InvalidMode - } - return Mode(op & modeMask) -} - -// Size returns the size for load and store operations. -func (op OpCode) Size() Size { - if op.Class().encoding() != loadOrStore { - return InvalidSize - } - return Size(op & sizeMask) -} - -// Source returns the source for branch and ALU operations. -func (op OpCode) Source() Source { - if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap { - return InvalidSource - } - return Source(op & sourceMask) -} - -// ALUOp returns the ALUOp. -func (op OpCode) ALUOp() ALUOp { - if op.Class().encoding() != jumpOrALU { - return InvalidALUOp - } - return ALUOp(op & aluMask) -} - -// Endianness returns the Endianness for a byte swap instruction. -func (op OpCode) Endianness() Endianness { - if op.ALUOp() != Swap { - return InvalidEndian - } - return Endianness(op & endianMask) -} - -// JumpOp returns the JumpOp. -func (op OpCode) JumpOp() JumpOp { - if op.Class().encoding() != jumpOrALU { - return InvalidJumpOp - } - return JumpOp(op & jumpMask) -} - -// SetMode sets the mode on load and store operations. -// -// Returns InvalidOpCode if op is of the wrong class. -func (op OpCode) SetMode(mode Mode) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) { - return InvalidOpCode - } - return (op & ^modeMask) | OpCode(mode) -} - -// SetSize sets the size on load and store operations. -// -// Returns InvalidOpCode if op is of the wrong class. -func (op OpCode) SetSize(size Size) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) { - return InvalidOpCode - } - return (op & ^sizeMask) | OpCode(size) -} - -// SetSource sets the source on jump and ALU operations. -// -// Returns InvalidOpCode if op is of the wrong class. -func (op OpCode) SetSource(source Source) OpCode { - if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) { - return InvalidOpCode - } - return (op & ^sourceMask) | OpCode(source) -} - -// SetALUOp sets the ALUOp on ALU operations. -// -// Returns InvalidOpCode if op is of the wrong class. -func (op OpCode) SetALUOp(alu ALUOp) OpCode { - class := op.Class() - if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) { - return InvalidOpCode - } - return (op & ^aluMask) | OpCode(alu) -} - -// SetJumpOp sets the JumpOp on jump operations. -// -// Returns InvalidOpCode if op is of the wrong class. -func (op OpCode) SetJumpOp(jump JumpOp) OpCode { - if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) { - return InvalidOpCode - } - return (op & ^jumpMask) | OpCode(jump) -} - -func (op OpCode) String() string { - var f strings.Builder - - switch class := op.Class(); class { - case LdClass, LdXClass, StClass, StXClass: - f.WriteString(strings.TrimSuffix(class.String(), "Class")) - - mode := op.Mode() - f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) - - switch op.Size() { - case DWord: - f.WriteString("DW") - case Word: - f.WriteString("W") - case Half: - f.WriteString("H") - case Byte: - f.WriteString("B") - } - - case ALU64Class, ALUClass: - f.WriteString(op.ALUOp().String()) - - if op.ALUOp() == Swap { - // Width for Endian is controlled by Constant - f.WriteString(op.Endianness().String()) - } else { - if class == ALUClass { - f.WriteString("32") - } - - f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) - } - - case JumpClass: - f.WriteString(op.JumpOp().String()) - if jop := op.JumpOp(); jop != Exit && jop != Call { - f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) - } - - default: - fmt.Fprintf(&f, "OpCode(%#x)", uint8(op)) - } - - return f.String() -} - -// valid returns true if all bits in value are covered by mask. -func valid(value, mask OpCode) bool { - return value & ^mask == 0 -} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go deleted file mode 100644 index 079ce1d..0000000 --- a/vendor/github.com/cilium/ebpf/asm/opcode_string.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT. - -package asm - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[LdClass-0] - _ = x[LdXClass-1] - _ = x[StClass-2] - _ = x[StXClass-3] - _ = x[ALUClass-4] - _ = x[JumpClass-5] - _ = x[ALU64Class-7] -} - -const ( - _Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass" - _Class_name_1 = "ALU64Class" -) - -var ( - _Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47} -) - -func (i Class) String() string { - switch { - case 0 <= i && i <= 5: - return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]] - case i == 7: - return _Class_name_1 - default: - return "Class(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go deleted file mode 100644 index 76cb44b..0000000 --- a/vendor/github.com/cilium/ebpf/asm/register.go +++ /dev/null @@ -1,49 +0,0 @@ -package asm - -import ( - "fmt" -) - -// Register is the source or destination of most operations. -type Register uint8 - -// R0 contains return values. -const R0 Register = 0 - -// Registers for function arguments. -const ( - R1 Register = R0 + 1 + iota - R2 - R3 - R4 - R5 -) - -// Callee saved registers preserved by function calls. -const ( - R6 Register = R5 + 1 + iota - R7 - R8 - R9 -) - -// Read-only frame pointer to access stack. -const ( - R10 Register = R9 + 1 - RFP = R10 -) - -// Pseudo registers used by 64bit loads and jumps -const ( - PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD - PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE - PseudoCall = R1 // BPF_PSEUDO_CALL -) - -func (r Register) String() string { - v := uint8(r) - if v == 10 { - return "rfp" - } - return fmt.Sprintf("r%d", v) -} diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go deleted file mode 100644 index 17cc694..0000000 --- a/vendor/github.com/cilium/ebpf/collection.go +++ /dev/null @@ -1,616 +0,0 @@ -package ebpf - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strings" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" -) - -// CollectionOptions control loading a collection into the kernel. -// -// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions. -type CollectionOptions struct { - Maps MapOptions - Programs ProgramOptions -} - -// CollectionSpec describes a collection. -type CollectionSpec struct { - Maps map[string]*MapSpec - Programs map[string]*ProgramSpec -} - -// Copy returns a recursive copy of the spec. -func (cs *CollectionSpec) Copy() *CollectionSpec { - if cs == nil { - return nil - } - - cpy := CollectionSpec{ - Maps: make(map[string]*MapSpec, len(cs.Maps)), - Programs: make(map[string]*ProgramSpec, len(cs.Programs)), - } - - for name, spec := range cs.Maps { - cpy.Maps[name] = spec.Copy() - } - - for name, spec := range cs.Programs { - cpy.Programs[name] = spec.Copy() - } - - return &cpy -} - -// RewriteMaps replaces all references to specific maps. -// -// Use this function to use pre-existing maps instead of creating new ones -// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. -// -// Returns an error if a named map isn't used in at least one program. -func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { - for symbol, m := range maps { - // have we seen a program that uses this symbol / map - seen := false - fd := m.FD() - for progName, progSpec := range cs.Programs { - err := progSpec.Instructions.RewriteMapPtr(symbol, fd) - - switch { - case err == nil: - seen = true - - case asm.IsUnreferencedSymbol(err): - // Not all programs need to use the map - - default: - return fmt.Errorf("program %s: %w", progName, err) - } - } - - if !seen { - return fmt.Errorf("map %s not referenced by any programs", symbol) - } - - // Prevent NewCollection from creating rewritten maps - delete(cs.Maps, symbol) - } - - return nil -} - -// RewriteConstants replaces the value of multiple constants. -// -// The constant must be defined like so in the C program: -// -// volatile const type foobar; -// volatile const type foobar = default; -// -// Replacement values must be of the same length as the C sizeof(type). -// If necessary, they are marshalled according to the same rules as -// map values. -// -// From Linux 5.5 the verifier will use constants to eliminate dead code. -// -// Returns an error if a constant doesn't exist. -func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { - rodata := cs.Maps[".rodata"] - if rodata == nil { - return errors.New("missing .rodata section") - } - - if rodata.BTF == nil { - return errors.New(".rodata section has no BTF") - } - - if n := len(rodata.Contents); n != 1 { - return fmt.Errorf("expected one key in .rodata, found %d", n) - } - - kv := rodata.Contents[0] - value, ok := kv.Value.([]byte) - if !ok { - return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value) - } - - buf := make([]byte, len(value)) - copy(buf, value) - - err := patchValue(buf, btf.MapValue(rodata.BTF), consts) - if err != nil { - return err - } - - rodata.Contents[0] = MapKV{kv.Key, buf} - return nil -} - -// Assign the contents of a CollectionSpec to a struct. -// -// This function is a short-cut to manually checking the presence -// of maps and programs in a collection spec. Consider using bpf2go if this -// sounds useful. -// -// The argument to must be a pointer to a struct. A field of the -// struct is updated with values from Programs or Maps if it -// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. -// The tag gives the name of the program or map as found in -// the CollectionSpec. -// -// struct { -// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` -// Bar *ebpf.MapSpec `ebpf:"bar_map"` -// Ignored int -// } -// -// Returns an error if any of the fields can't be found, or -// if the same map or program is assigned multiple times. -func (cs *CollectionSpec) Assign(to interface{}) error { - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { - switch typ { - case reflect.TypeOf((*ProgramSpec)(nil)): - p := cs.Programs[name] - if p == nil { - return reflect.Value{}, fmt.Errorf("missing program %q", name) - } - return reflect.ValueOf(p), nil - case reflect.TypeOf((*MapSpec)(nil)): - m := cs.Maps[name] - if m == nil { - return reflect.Value{}, fmt.Errorf("missing map %q", name) - } - return reflect.ValueOf(m), nil - default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) - } - } - - return assignValues(to, valueOf) -} - -// LoadAndAssign maps and programs into the kernel and assign them to a struct. -// -// This function is a short-cut to manually checking the presence -// of maps and programs in a collection spec. Consider using bpf2go if this -// sounds useful. -// -// The argument to must be a pointer to a struct. A field of the -// struct is updated with values from Programs or Maps if it -// has an `ebpf` tag and its type is *Program or *Map. -// The tag gives the name of the program or map as found in -// the CollectionSpec. -// -// struct { -// Foo *ebpf.Program `ebpf:"xdp_foo"` -// Bar *ebpf.Map `ebpf:"bar_map"` -// Ignored int -// } -// -// opts may be nil. -// -// Returns an error if any of the fields can't be found, or -// if the same map or program is assigned multiple times. -func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { - if opts == nil { - opts = &CollectionOptions{} - } - - loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts) - defer cleanup() - - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { - switch typ { - case reflect.TypeOf((*Program)(nil)): - p, err := loadProgram(name) - if err != nil { - return reflect.Value{}, err - } - return reflect.ValueOf(p), nil - case reflect.TypeOf((*Map)(nil)): - m, err := loadMap(name) - if err != nil { - return reflect.Value{}, err - } - return reflect.ValueOf(m), nil - default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) - } - } - - if err := assignValues(to, valueOf); err != nil { - return err - } - - done() - return nil -} - -// Collection is a collection of Programs and Maps associated -// with their symbols -type Collection struct { - Programs map[string]*Program - Maps map[string]*Map -} - -// NewCollection creates a Collection from a specification. -func NewCollection(spec *CollectionSpec) (*Collection, error) { - return NewCollectionWithOptions(spec, CollectionOptions{}) -} - -// NewCollectionWithOptions creates a Collection from a specification. -func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { - loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts) - defer cleanup() - - for mapName := range spec.Maps { - _, err := loadMap(mapName) - if err != nil { - return nil, err - } - } - - for progName := range spec.Programs { - _, err := loadProgram(progName) - if err != nil { - return nil, err - } - } - - maps, progs := done() - return &Collection{ - progs, - maps, - }, nil -} - -type handleCache struct { - btfHandles map[*btf.Spec]*btf.Handle - btfSpecs map[io.ReaderAt]*btf.Spec -} - -func newHandleCache() *handleCache { - return &handleCache{ - btfHandles: make(map[*btf.Spec]*btf.Handle), - btfSpecs: make(map[io.ReaderAt]*btf.Spec), - } -} - -func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) { - if hc.btfHandles[spec] != nil { - return hc.btfHandles[spec], nil - } - - handle, err := btf.NewHandle(spec) - if err != nil { - return nil, err - } - - hc.btfHandles[spec] = handle - return handle, nil -} - -func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) { - if hc.btfSpecs[rd] != nil { - return hc.btfSpecs[rd], nil - } - - spec, err := btf.LoadSpecFromReader(rd) - if err != nil { - return nil, err - } - - hc.btfSpecs[rd] = spec - return spec, nil -} - -func (hc handleCache) close() { - for _, handle := range hc.btfHandles { - handle.Close() - } - hc.btfHandles = nil - hc.btfSpecs = nil -} - -func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) ( - loadMap func(string) (*Map, error), - loadProgram func(string) (*Program, error), - done func() (map[string]*Map, map[string]*Program), - cleanup func(), -) { - var ( - maps = make(map[string]*Map) - progs = make(map[string]*Program) - handles = newHandleCache() - skipMapsAndProgs = false - ) - - cleanup = func() { - handles.close() - - if skipMapsAndProgs { - return - } - - for _, m := range maps { - m.Close() - } - - for _, p := range progs { - p.Close() - } - } - - done = func() (map[string]*Map, map[string]*Program) { - skipMapsAndProgs = true - return maps, progs - } - - loadMap = func(mapName string) (*Map, error) { - if m := maps[mapName]; m != nil { - return m, nil - } - - mapSpec := coll.Maps[mapName] - if mapSpec == nil { - return nil, fmt.Errorf("missing map %s", mapName) - } - - m, err := newMapWithOptions(mapSpec, opts.Maps, handles) - if err != nil { - return nil, fmt.Errorf("map %s: %w", mapName, err) - } - - maps[mapName] = m - return m, nil - } - - loadProgram = func(progName string) (*Program, error) { - if prog := progs[progName]; prog != nil { - return prog, nil - } - - progSpec := coll.Programs[progName] - if progSpec == nil { - return nil, fmt.Errorf("unknown program %s", progName) - } - - progSpec = progSpec.Copy() - - // Rewrite any reference to a valid map. - for i := range progSpec.Instructions { - ins := &progSpec.Instructions[i] - - if !ins.IsLoadFromMap() || ins.Reference == "" { - continue - } - - if uint32(ins.Constant) != math.MaxUint32 { - // Don't overwrite maps already rewritten, users can - // rewrite programs in the spec themselves - continue - } - - m, err := loadMap(ins.Reference) - if err != nil { - return nil, fmt.Errorf("program %s: %w", progName, err) - } - - fd := m.FD() - if fd < 0 { - return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) - } - if err := ins.RewriteMapPtr(m.FD()); err != nil { - return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err) - } - } - - prog, err := newProgramWithOptions(progSpec, opts.Programs, handles) - if err != nil { - return nil, fmt.Errorf("program %s: %w", progName, err) - } - - progs[progName] = prog - return prog, nil - } - - return -} - -// LoadCollection parses an object file and converts it to a collection. -func LoadCollection(file string) (*Collection, error) { - spec, err := LoadCollectionSpec(file) - if err != nil { - return nil, err - } - return NewCollection(spec) -} - -// Close frees all maps and programs associated with the collection. -// -// The collection mustn't be used afterwards. -func (coll *Collection) Close() { - for _, prog := range coll.Programs { - prog.Close() - } - for _, m := range coll.Maps { - m.Close() - } -} - -// DetachMap removes the named map from the Collection. -// -// This means that a later call to Close() will not affect this map. -// -// Returns nil if no map of that name exists. -func (coll *Collection) DetachMap(name string) *Map { - m := coll.Maps[name] - delete(coll.Maps, name) - return m -} - -// DetachProgram removes the named program from the Collection. -// -// This means that a later call to Close() will not affect this program. -// -// Returns nil if no program of that name exists. -func (coll *Collection) DetachProgram(name string) *Program { - p := coll.Programs[name] - delete(coll.Programs, name) - return p -} - -// Assign the contents of a collection to a struct. -// -// Deprecated: use CollectionSpec.Assign instead. It provides the same -// functionality but creates only the maps and programs requested. -func (coll *Collection) Assign(to interface{}) error { - assignedMaps := make(map[string]struct{}) - assignedPrograms := make(map[string]struct{}) - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { - switch typ { - case reflect.TypeOf((*Program)(nil)): - p := coll.Programs[name] - if p == nil { - return reflect.Value{}, fmt.Errorf("missing program %q", name) - } - assignedPrograms[name] = struct{}{} - return reflect.ValueOf(p), nil - case reflect.TypeOf((*Map)(nil)): - m := coll.Maps[name] - if m == nil { - return reflect.Value{}, fmt.Errorf("missing map %q", name) - } - assignedMaps[name] = struct{}{} - return reflect.ValueOf(m), nil - default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) - } - } - - if err := assignValues(to, valueOf); err != nil { - return err - } - - for name := range assignedPrograms { - coll.DetachProgram(name) - } - - for name := range assignedMaps { - coll.DetachMap(name) - } - - return nil -} - -func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error { - type structField struct { - reflect.StructField - value reflect.Value - } - - var ( - fields []structField - visitedTypes = make(map[reflect.Type]bool) - flattenStruct func(reflect.Value) error - ) - - flattenStruct = func(structVal reflect.Value) error { - structType := structVal.Type() - if structType.Kind() != reflect.Struct { - return fmt.Errorf("%s is not a struct", structType) - } - - if visitedTypes[structType] { - return fmt.Errorf("recursion on type %s", structType) - } - - for i := 0; i < structType.NumField(); i++ { - field := structField{structType.Field(i), structVal.Field(i)} - - name := field.Tag.Get("ebpf") - if name != "" { - fields = append(fields, field) - continue - } - - var err error - switch field.Type.Kind() { - case reflect.Ptr: - if field.Type.Elem().Kind() != reflect.Struct { - continue - } - - if field.value.IsNil() { - return fmt.Errorf("nil pointer to %s", structType) - } - - err = flattenStruct(field.value.Elem()) - - case reflect.Struct: - err = flattenStruct(field.value) - - default: - continue - } - - if err != nil { - return fmt.Errorf("field %s: %w", field.Name, err) - } - } - - return nil - } - - toValue := reflect.ValueOf(to) - if toValue.Type().Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer to struct", to) - } - - if toValue.IsNil() { - return fmt.Errorf("nil pointer to %T", to) - } - - if err := flattenStruct(toValue.Elem()); err != nil { - return err - } - - type elem struct { - // Either *Map or *Program - typ reflect.Type - name string - } - - assignedTo := make(map[elem]string) - for _, field := range fields { - name := field.Tag.Get("ebpf") - if strings.Contains(name, ",") { - return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) - } - - e := elem{field.Type, name} - if assignedField := assignedTo[e]; assignedField != "" { - return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField) - } - - value, err := valueOf(field.Type, name) - if err != nil { - return fmt.Errorf("field %s: %w", field.Name, err) - } - - if !field.value.CanSet() { - return fmt.Errorf("field %s: can't set value", field.Name) - } - - field.value.Set(value) - assignedTo[e] = field.Name - } - - return nil -} diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go deleted file mode 100644 index f7f34da..0000000 --- a/vendor/github.com/cilium/ebpf/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package ebpf is a toolkit for working with eBPF programs. -// -// eBPF programs are small snippets of code which are executed directly -// in a VM in the Linux kernel, which makes them very fast and flexible. -// Many Linux subsystems now accept eBPF programs. This makes it possible -// to implement highly application specific logic inside the kernel, -// without having to modify the actual kernel itself. -// -// This package is designed for long-running processes which -// want to use eBPF to implement part of their application logic. It has no -// run-time dependencies outside of the library and the Linux kernel itself. -// eBPF code should be compiled ahead of time using clang, and shipped with -// your application as any other resource. -// -// Use the link subpackage to attach a loaded program to a hook in the kernel. -package ebpf diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go deleted file mode 100644 index c2afbc3..0000000 --- a/vendor/github.com/cilium/ebpf/elf_reader.go +++ /dev/null @@ -1,953 +0,0 @@ -package ebpf - -import ( - "bufio" - "bytes" - "debug/elf" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "os" - "strings" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" - "github.com/cilium/ebpf/internal/unix" -) - -// elfCode is a convenience to reduce the amount of arguments that have to -// be passed around explicitly. You should treat it's contents as immutable. -type elfCode struct { - *internal.SafeELFFile - sections map[elf.SectionIndex]*elfSection - license string - version uint32 - btf *btf.Spec -} - -// LoadCollectionSpec parses an ELF file into a CollectionSpec. -func LoadCollectionSpec(file string) (*CollectionSpec, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - spec, err := LoadCollectionSpecFromReader(f) - if err != nil { - return nil, fmt.Errorf("file %s: %w", file, err) - } - return spec, nil -} - -// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. -func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { - f, err := internal.NewSafeELFFile(rd) - if err != nil { - return nil, err - } - defer f.Close() - - var ( - licenseSection *elf.Section - versionSection *elf.Section - sections = make(map[elf.SectionIndex]*elfSection) - relSections = make(map[elf.SectionIndex]*elf.Section) - ) - - // This is the target of relocations generated by inline assembly. - sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection) - - // Collect all the sections we're interested in. This includes relocations - // which we parse later. - for i, sec := range f.Sections { - idx := elf.SectionIndex(i) - - switch { - case strings.HasPrefix(sec.Name, "license"): - licenseSection = sec - case strings.HasPrefix(sec.Name, "version"): - versionSection = sec - case strings.HasPrefix(sec.Name, "maps"): - sections[idx] = newElfSection(sec, mapSection) - case sec.Name == ".maps": - sections[idx] = newElfSection(sec, btfMapSection) - case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"): - sections[idx] = newElfSection(sec, dataSection) - case sec.Type == elf.SHT_REL: - // Store relocations under the section index of the target - relSections[elf.SectionIndex(sec.Info)] = sec - case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: - sections[idx] = newElfSection(sec, programSection) - } - } - - license, err := loadLicense(licenseSection) - if err != nil { - return nil, fmt.Errorf("load license: %w", err) - } - - version, err := loadVersion(versionSection, f.ByteOrder) - if err != nil { - return nil, fmt.Errorf("load version: %w", err) - } - - btfSpec, err := btf.LoadSpecFromReader(rd) - if err != nil && !errors.Is(err, btf.ErrNotFound) { - return nil, fmt.Errorf("load BTF: %w", err) - } - - // Assign symbols to all the sections we're interested in. - symbols, err := f.Symbols() - if err != nil { - return nil, fmt.Errorf("load symbols: %v", err) - } - - for _, symbol := range symbols { - idx := symbol.Section - symType := elf.ST_TYPE(symbol.Info) - - section := sections[idx] - if section == nil { - continue - } - - // Older versions of LLVM don't tag symbols correctly, so keep - // all NOTYPE ones. - keep := symType == elf.STT_NOTYPE - switch section.kind { - case mapSection, btfMapSection, dataSection: - keep = keep || symType == elf.STT_OBJECT - case programSection: - keep = keep || symType == elf.STT_FUNC - } - if !keep || symbol.Name == "" { - continue - } - - section.symbols[symbol.Value] = symbol - } - - ec := &elfCode{ - SafeELFFile: f, - sections: sections, - license: license, - version: version, - btf: btfSpec, - } - - // Go through relocation sections, and parse the ones for sections we're - // interested in. Make sure that relocations point at valid sections. - for idx, relSection := range relSections { - section := sections[idx] - if section == nil { - continue - } - - rels, err := ec.loadRelocations(relSection, symbols) - if err != nil { - return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err) - } - - for _, rel := range rels { - target := sections[rel.Section] - if target == nil { - return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) - } - - if target.Flags&elf.SHF_STRINGS > 0 { - return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported) - } - - target.references++ - } - - section.relocations = rels - } - - // Collect all the various ways to define maps. - maps := make(map[string]*MapSpec) - if err := ec.loadMaps(maps); err != nil { - return nil, fmt.Errorf("load maps: %w", err) - } - - if err := ec.loadBTFMaps(maps); err != nil { - return nil, fmt.Errorf("load BTF maps: %w", err) - } - - if err := ec.loadDataSections(maps); err != nil { - return nil, fmt.Errorf("load data sections: %w", err) - } - - // Finally, collect programs and link them. - progs, err := ec.loadPrograms() - if err != nil { - return nil, fmt.Errorf("load programs: %w", err) - } - - return &CollectionSpec{maps, progs}, nil -} - -func loadLicense(sec *elf.Section) (string, error) { - if sec == nil { - return "", nil - } - - data, err := sec.Data() - if err != nil { - return "", fmt.Errorf("section %s: %v", sec.Name, err) - } - return string(bytes.TrimRight(data, "\000")), nil -} - -func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { - if sec == nil { - return 0, nil - } - - var version uint32 - if err := binary.Read(sec.Open(), bo, &version); err != nil { - return 0, fmt.Errorf("section %s: %v", sec.Name, err) - } - return version, nil -} - -type elfSectionKind int - -const ( - undefSection elfSectionKind = iota - mapSection - btfMapSection - programSection - dataSection -) - -type elfSection struct { - *elf.Section - kind elfSectionKind - // Offset from the start of the section to a symbol - symbols map[uint64]elf.Symbol - // Offset from the start of the section to a relocation, which points at - // a symbol in another section. - relocations map[uint64]elf.Symbol - // The number of relocations pointing at this section. - references int -} - -func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { - return &elfSection{ - section, - kind, - make(map[uint64]elf.Symbol), - make(map[uint64]elf.Symbol), - 0, - } -} - -func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { - var ( - progs []*ProgramSpec - libs []*ProgramSpec - ) - - for _, sec := range ec.sections { - if sec.kind != programSection { - continue - } - - if len(sec.symbols) == 0 { - return nil, fmt.Errorf("section %v: missing symbols", sec.Name) - } - - funcSym, ok := sec.symbols[0] - if !ok { - return nil, fmt.Errorf("section %v: no label at start", sec.Name) - } - - insns, length, err := ec.loadInstructions(sec) - if err != nil { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) - } - - progType, attachType, progFlags, attachTo := getProgType(sec.Name) - - spec := &ProgramSpec{ - Name: funcSym.Name, - Type: progType, - Flags: progFlags, - AttachType: attachType, - AttachTo: attachTo, - License: ec.license, - KernelVersion: ec.version, - Instructions: insns, - ByteOrder: ec.ByteOrder, - } - - if ec.btf != nil { - spec.BTF, err = ec.btf.Program(sec.Name, length) - if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) - } - } - - if spec.Type == UnspecifiedProgram { - // There is no single name we can use for "library" sections, - // since they may contain multiple functions. We'll decode the - // labels they contain later on, and then link sections that way. - libs = append(libs, spec) - } else { - progs = append(progs, spec) - } - } - - res := make(map[string]*ProgramSpec, len(progs)) - for _, prog := range progs { - err := link(prog, libs) - if err != nil { - return nil, fmt.Errorf("program %s: %w", prog.Name, err) - } - res[prog.Name] = prog - } - - return res, nil -} - -func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) { - var ( - r = bufio.NewReader(section.Open()) - insns asm.Instructions - offset uint64 - ) - for { - var ins asm.Instruction - n, err := ins.Unmarshal(r, ec.ByteOrder) - if err == io.EOF { - return insns, offset, nil - } - if err != nil { - return nil, 0, fmt.Errorf("offset %d: %w", offset, err) - } - - ins.Symbol = section.symbols[offset].Name - - if rel, ok := section.relocations[offset]; ok { - if err = ec.relocateInstruction(&ins, rel); err != nil { - return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err) - } - } - - insns = append(insns, ins) - offset += n - } -} - -func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { - var ( - typ = elf.ST_TYPE(rel.Info) - bind = elf.ST_BIND(rel.Info) - name = rel.Name - ) - - target := ec.sections[rel.Section] - - switch target.kind { - case mapSection, btfMapSection: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) - } - - if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { - // STT_NOTYPE is generated on clang < 8 which doesn't tag - // relocations appropriately. - return fmt.Errorf("map load: incorrect relocation type %v", typ) - } - - ins.Src = asm.PseudoMapFD - - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - - case dataSection: - var offset uint32 - switch typ { - case elf.STT_SECTION: - if bind != elf.STB_LOCAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) - } - - // This is really a reference to a static symbol, which clang doesn't - // emit a symbol table entry for. Instead it encodes the offset in - // the instruction itself. - offset = uint32(uint64(ins.Constant)) - - case elf.STT_OBJECT: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) - } - - offset = uint32(rel.Value) - - default: - return fmt.Errorf("incorrect relocation type %v for direct map load", typ) - } - - // We rely on using the name of the data section as the reference. It - // would be nicer to keep the real name in case of an STT_OBJECT, but - // it's not clear how to encode that into Instruction. - name = target.Name - - // The kernel expects the offset in the second basic BPF instruction. - ins.Constant = int64(uint64(offset) << 32) - ins.Src = asm.PseudoMapValue - - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - - case programSection: - if ins.OpCode.JumpOp() != asm.Call { - return fmt.Errorf("not a call instruction: %s", ins) - } - - if ins.Src != asm.PseudoCall { - return fmt.Errorf("call: %s: incorrect source register", name) - } - - switch typ { - case elf.STT_NOTYPE, elf.STT_FUNC: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) - } - - case elf.STT_SECTION: - if bind != elf.STB_LOCAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) - } - - // The function we want to call is in the indicated section, - // at the offset encoded in the instruction itself. Reverse - // the calculation to find the real function we're looking for. - // A value of -1 references the first instruction in the section. - offset := int64(int32(ins.Constant)+1) * asm.InstructionSize - if offset < 0 { - return fmt.Errorf("call: %s: invalid offset %d", name, offset) - } - - sym, ok := target.symbols[uint64(offset)] - if !ok { - return fmt.Errorf("call: %s: no symbol at offset %d", name, offset) - } - - ins.Constant = -1 - name = sym.Name - - default: - return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) - } - - case undefSection: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind) - } - - if typ != elf.STT_NOTYPE { - return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ) - } - - // There is nothing to do here but set ins.Reference. - - default: - return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) - } - - ins.Reference = name - return nil -} - -func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { - for _, sec := range ec.sections { - if sec.kind != mapSection { - continue - } - - nSym := len(sec.symbols) - if nSym == 0 { - return fmt.Errorf("section %v: no symbols", sec.Name) - } - - if sec.Size%uint64(nSym) != 0 { - return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) - } - - var ( - r = bufio.NewReader(sec.Open()) - size = sec.Size / uint64(nSym) - ) - for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size { - mapSym, ok := sec.symbols[offset] - if !ok { - return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) - } - - mapName := mapSym.Name - if maps[mapName] != nil { - return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) - } - - lr := io.LimitReader(r, int64(size)) - - spec := MapSpec{ - Name: SanitizeName(mapName, -1), - } - switch { - case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: - return fmt.Errorf("map %s: missing type", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: - return fmt.Errorf("map %s: missing key size", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: - return fmt.Errorf("map %s: missing value size", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: - return fmt.Errorf("map %s: missing max entries", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: - return fmt.Errorf("map %s: missing flags", mapName) - } - - if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil { - return fmt.Errorf("map %s: unknown and non-zero fields in definition", mapName) - } - - if err := spec.clampPerfEventArraySize(); err != nil { - return fmt.Errorf("map %s: %w", mapName, err) - } - - maps[mapName] = &spec - } - } - - return nil -} - -func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { - for _, sec := range ec.sections { - if sec.kind != btfMapSection { - continue - } - - if ec.btf == nil { - return fmt.Errorf("missing BTF") - } - - _, err := io.Copy(internal.DiscardZeroes{}, bufio.NewReader(sec.Open())) - if err != nil { - return fmt.Errorf("section %v: initializing BTF map definitions: %w", sec.Name, internal.ErrNotSupported) - } - - var ds btf.Datasec - if err := ec.btf.FindType(sec.Name, &ds); err != nil { - return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) - } - - for _, vs := range ds.Vars { - v, ok := vs.Type.(*btf.Var) - if !ok { - return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) - } - name := string(v.Name) - - if maps[name] != nil { - return fmt.Errorf("section %v: map %s already exists", sec.Name, name) - } - - mapStruct, ok := v.Type.(*btf.Struct) - if !ok { - return fmt.Errorf("expected struct, got %s", v.Type) - } - - mapSpec, err := mapSpecFromBTF(name, mapStruct, false, ec.btf) - if err != nil { - return fmt.Errorf("map %v: %w", name, err) - } - - if err := mapSpec.clampPerfEventArraySize(); err != nil { - return fmt.Errorf("map %v: %w", name, err) - } - - maps[name] = mapSpec - } - } - - return nil -} - -// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing -// a BTF map definition. The name and spec arguments will be copied to the -// resulting MapSpec, and inner must be true on any resursive invocations. -func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*MapSpec, error) { - - var ( - key, value btf.Type - keySize, valueSize uint32 - mapType, flags, maxEntries uint32 - pinType PinType - innerMapSpec *MapSpec - err error - ) - - for i, member := range def.Members { - switch member.Name { - case "type": - mapType, err = uintFromBTF(member.Type) - if err != nil { - return nil, fmt.Errorf("can't get type: %w", err) - } - - case "map_flags": - flags, err = uintFromBTF(member.Type) - if err != nil { - return nil, fmt.Errorf("can't get BTF map flags: %w", err) - } - - case "max_entries": - maxEntries, err = uintFromBTF(member.Type) - if err != nil { - return nil, fmt.Errorf("can't get BTF map max entries: %w", err) - } - - case "key": - if keySize != 0 { - return nil, errors.New("both key and key_size given") - } - - pk, ok := member.Type.(*btf.Pointer) - if !ok { - return nil, fmt.Errorf("key type is not a pointer: %T", member.Type) - } - - key = pk.Target - - size, err := btf.Sizeof(pk.Target) - if err != nil { - return nil, fmt.Errorf("can't get size of BTF key: %w", err) - } - - keySize = uint32(size) - - case "value": - if valueSize != 0 { - return nil, errors.New("both value and value_size given") - } - - vk, ok := member.Type.(*btf.Pointer) - if !ok { - return nil, fmt.Errorf("value type is not a pointer: %T", member.Type) - } - - value = vk.Target - - size, err := btf.Sizeof(vk.Target) - if err != nil { - return nil, fmt.Errorf("can't get size of BTF value: %w", err) - } - - valueSize = uint32(size) - - case "key_size": - // Key needs to be nil and keySize needs to be 0 for key_size to be - // considered a valid member. - if key != nil || keySize != 0 { - return nil, errors.New("both key and key_size given") - } - - keySize, err = uintFromBTF(member.Type) - if err != nil { - return nil, fmt.Errorf("can't get BTF key size: %w", err) - } - - case "value_size": - // Value needs to be nil and valueSize needs to be 0 for value_size to be - // considered a valid member. - if value != nil || valueSize != 0 { - return nil, errors.New("both value and value_size given") - } - - valueSize, err = uintFromBTF(member.Type) - if err != nil { - return nil, fmt.Errorf("can't get BTF value size: %w", err) - } - - case "pinning": - if inner { - return nil, errors.New("inner maps can't be pinned") - } - - pinning, err := uintFromBTF(member.Type) - if err != nil { - return nil, fmt.Errorf("can't get pinning: %w", err) - } - - pinType = PinType(pinning) - - case "values": - // The 'values' field in BTF map definitions is used for declaring map - // value types that are references to other BPF objects, like other maps - // or programs. It is always expected to be an array of pointers. - if i != len(def.Members)-1 { - return nil, errors.New("'values' must be the last member in a BTF map definition") - } - - if valueSize != 0 && valueSize != 4 { - return nil, errors.New("value_size must be 0 or 4") - } - valueSize = 4 - - valueType, err := resolveBTFArrayMacro(member.Type) - if err != nil { - return nil, fmt.Errorf("can't resolve type of member 'values': %w", err) - } - - switch t := valueType.(type) { - case *btf.Struct: - // The values member pointing to an array of structs means we're expecting - // a map-in-map declaration. - if MapType(mapType) != ArrayOfMaps && MapType(mapType) != HashOfMaps { - return nil, errors.New("outer map needs to be an array or a hash of maps") - } - if inner { - return nil, fmt.Errorf("nested inner maps are not supported") - } - - // This inner map spec is used as a map template, but it needs to be - // created as a traditional map before it can be used to do so. - // libbpf names the inner map template '.inner', but we - // opted for _inner to simplify validation logic. (dots only supported - // on kernels 5.2 and up) - // Pass the BTF spec from the parent object, since both parent and - // child must be created from the same BTF blob (on kernels that support BTF). - innerMapSpec, err = mapSpecFromBTF(name+"_inner", t, true, spec) - if err != nil { - return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) - } - - default: - return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) - } - - default: - return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) - } - } - - bm := btf.NewMap(spec, key, value) - - return &MapSpec{ - Name: SanitizeName(name, -1), - Type: MapType(mapType), - KeySize: keySize, - ValueSize: valueSize, - MaxEntries: maxEntries, - Flags: flags, - BTF: &bm, - Pinning: pinType, - InnerMap: innerMapSpec, - }, nil -} - -// uintFromBTF resolves the __uint macro, which is a pointer to a sized -// array, e.g. for int (*foo)[10], this function will return 10. -func uintFromBTF(typ btf.Type) (uint32, error) { - ptr, ok := typ.(*btf.Pointer) - if !ok { - return 0, fmt.Errorf("not a pointer: %v", typ) - } - - arr, ok := ptr.Target.(*btf.Array) - if !ok { - return 0, fmt.Errorf("not a pointer to array: %v", typ) - } - - return arr.Nelems, nil -} - -// resolveBTFArrayMacro resolves the __array macro, which declares an array -// of pointers to a given type. This function returns the target Type of -// the pointers in the array. -func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { - arr, ok := typ.(*btf.Array) - if !ok { - return nil, fmt.Errorf("not an array: %v", typ) - } - - ptr, ok := arr.Type.(*btf.Pointer) - if !ok { - return nil, fmt.Errorf("not an array of pointers: %v", typ) - } - - return ptr.Target, nil -} - -func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { - for _, sec := range ec.sections { - if sec.kind != dataSection { - continue - } - - if sec.references == 0 { - // Prune data sections which are not referenced by any - // instructions. - continue - } - - if ec.btf == nil { - return errors.New("data sections require BTF, make sure all consts are marked as static") - } - - btfMap, err := ec.btf.Datasec(sec.Name) - if err != nil { - return err - } - - data, err := sec.Data() - if err != nil { - return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) - } - - if uint64(len(data)) > math.MaxUint32 { - return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) - } - - mapSpec := &MapSpec{ - Name: SanitizeName(sec.Name, -1), - Type: Array, - KeySize: 4, - ValueSize: uint32(len(data)), - MaxEntries: 1, - Contents: []MapKV{{uint32(0), data}}, - BTF: btfMap, - } - - switch sec.Name { - case ".rodata": - mapSpec.Flags = unix.BPF_F_RDONLY_PROG - mapSpec.Freeze = true - case ".bss": - // The kernel already zero-initializes the map - mapSpec.Contents = nil - } - - maps[sec.Name] = mapSpec - } - return nil -} - -func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { - types := map[string]struct { - progType ProgramType - attachType AttachType - progFlags uint32 - }{ - // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c - "socket": {SocketFilter, AttachNone, 0}, - "seccomp": {SocketFilter, AttachNone, 0}, - "kprobe/": {Kprobe, AttachNone, 0}, - "uprobe/": {Kprobe, AttachNone, 0}, - "kretprobe/": {Kprobe, AttachNone, 0}, - "uretprobe/": {Kprobe, AttachNone, 0}, - "tracepoint/": {TracePoint, AttachNone, 0}, - "raw_tracepoint/": {RawTracepoint, AttachNone, 0}, - "raw_tp/": {RawTracepoint, AttachNone, 0}, - "tp_btf/": {Tracing, AttachTraceRawTp, 0}, - "xdp": {XDP, AttachNone, 0}, - "perf_event": {PerfEvent, AttachNone, 0}, - "lwt_in": {LWTIn, AttachNone, 0}, - "lwt_out": {LWTOut, AttachNone, 0}, - "lwt_xmit": {LWTXmit, AttachNone, 0}, - "lwt_seg6local": {LWTSeg6Local, AttachNone, 0}, - "sockops": {SockOps, AttachCGroupSockOps, 0}, - "sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_msg": {SkMsg, AttachSkSKBStreamVerdict, 0}, - "lirc_mode2": {LircMode2, AttachLircMode2, 0}, - "flow_dissector": {FlowDissector, AttachFlowDissector, 0}, - "iter/": {Tracing, AttachTraceIter, 0}, - "fentry/": {Tracing, AttachTraceFEntry, 0}, - "fmod_ret/": {Tracing, AttachModifyReturn, 0}, - "fexit/": {Tracing, AttachTraceFExit, 0}, - "fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, - "fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, - "fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, - "sk_lookup/": {SkLookup, AttachSkLookup, 0}, - "lsm/": {LSM, AttachLSMMac, 0}, - "lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, - - "cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress, 0}, - "cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress, 0}, - "cgroup/dev": {CGroupDevice, AttachCGroupDevice, 0}, - "cgroup/skb": {CGroupSKB, AttachNone, 0}, - "cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate, 0}, - "cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind, 0}, - "cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind, 0}, - "cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind, 0}, - "cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind, 0}, - "cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect, 0}, - "cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect, 0}, - "cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, - "cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, - "cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, - "cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, - "cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl, 0}, - "cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt, 0}, - "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0}, - "classifier": {SchedCLS, AttachNone, 0}, - "action": {SchedACT, AttachNone, 0}, - } - - for prefix, t := range types { - if !strings.HasPrefix(sectionName, prefix) { - continue - } - - if !strings.HasSuffix(prefix, "/") { - return t.progType, t.attachType, t.progFlags, "" - } - - return t.progType, t.attachType, t.progFlags, sectionName[len(prefix):] - } - - return UnspecifiedProgram, AttachNone, 0, "" -} - -func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { - rels := make(map[uint64]elf.Symbol) - - if sec.Entsize < 16 { - return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name) - } - - r := bufio.NewReader(sec.Open()) - for off := uint64(0); off < sec.Size; off += sec.Entsize { - ent := io.LimitReader(r, int64(sec.Entsize)) - - var rel elf.Rel64 - if binary.Read(ent, ec.ByteOrder, &rel) != nil { - return nil, fmt.Errorf("can't parse relocation at offset %v", off) - } - - symNo := int(elf.R_SYM64(rel.Info) - 1) - if symNo >= len(symbols) { - return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo) - } - - symbol := symbols[symNo] - rels[rel.Off] = symbol - } - - return rels, nil -} diff --git a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go b/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go deleted file mode 100644 index d46d135..0000000 --- a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package ebpf - -import "bytes" - -func FuzzLoadCollectionSpec(data []byte) int { - spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data)) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go deleted file mode 100644 index b95131e..0000000 --- a/vendor/github.com/cilium/ebpf/info.go +++ /dev/null @@ -1,239 +0,0 @@ -package ebpf - -import ( - "bufio" - "encoding/hex" - "errors" - "fmt" - "io" - "os" - "strings" - "syscall" - "time" - - "github.com/cilium/ebpf/internal" -) - -// MapInfo describes a map. -type MapInfo struct { - Type MapType - id MapID - KeySize uint32 - ValueSize uint32 - MaxEntries uint32 - Flags uint32 - // Name as supplied by user space at load time. - Name string -} - -func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { - info, err := bpfGetMapInfoByFD(fd) - if errors.Is(err, syscall.EINVAL) { - return newMapInfoFromProc(fd) - } - if err != nil { - return nil, err - } - - return &MapInfo{ - MapType(info.map_type), - MapID(info.id), - info.key_size, - info.value_size, - info.max_entries, - info.map_flags, - // name is available from 4.15. - internal.CString(info.name[:]), - }, nil -} - -func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) { - var mi MapInfo - err := scanFdInfo(fd, map[string]interface{}{ - "map_type": &mi.Type, - "key_size": &mi.KeySize, - "value_size": &mi.ValueSize, - "max_entries": &mi.MaxEntries, - "map_flags": &mi.Flags, - }) - if err != nil { - return nil, err - } - return &mi, nil -} - -// ID returns the map ID. -// -// Available from 4.13. -// -// The bool return value indicates whether this optional field is available. -func (mi *MapInfo) ID() (MapID, bool) { - return mi.id, mi.id > 0 -} - -// programStats holds statistics of a program. -type programStats struct { - // Total accumulated runtime of the program ins ns. - runtime time.Duration - // Total number of times the program was called. - runCount uint64 -} - -// ProgramInfo describes a program. -type ProgramInfo struct { - Type ProgramType - id ProgramID - // Truncated hash of the BPF bytecode. - Tag string - // Name as supplied by user space at load time. - Name string - - stats *programStats -} - -func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { - info, err := bpfGetProgInfoByFD(fd) - if errors.Is(err, syscall.EINVAL) { - return newProgramInfoFromProc(fd) - } - if err != nil { - return nil, err - } - - return &ProgramInfo{ - Type: ProgramType(info.prog_type), - id: ProgramID(info.id), - // tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD. - Tag: hex.EncodeToString(info.tag[:]), - // name is available from 4.15. - Name: internal.CString(info.name[:]), - stats: &programStats{ - runtime: time.Duration(info.run_time_ns), - runCount: info.run_cnt, - }, - }, nil -} - -func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) { - var info ProgramInfo - err := scanFdInfo(fd, map[string]interface{}{ - "prog_type": &info.Type, - "prog_tag": &info.Tag, - }) - if errors.Is(err, errMissingFields) { - return nil, &internal.UnsupportedFeatureError{ - Name: "reading program info from /proc/self/fdinfo", - MinimumVersion: internal.Version{4, 10, 0}, - } - } - if err != nil { - return nil, err - } - - return &info, nil -} - -// ID returns the program ID. -// -// Available from 4.13. -// -// The bool return value indicates whether this optional field is available. -func (pi *ProgramInfo) ID() (ProgramID, bool) { - return pi.id, pi.id > 0 -} - -// RunCount returns the total number of times the program was called. -// -// Can return 0 if the collection of statistics is not enabled. See EnableStats(). -// The bool return value indicates whether this optional field is available. -func (pi *ProgramInfo) RunCount() (uint64, bool) { - if pi.stats != nil { - return pi.stats.runCount, true - } - return 0, false -} - -// Runtime returns the total accumulated runtime of the program. -// -// Can return 0 if the collection of statistics is not enabled. See EnableStats(). -// The bool return value indicates whether this optional field is available. -func (pi *ProgramInfo) Runtime() (time.Duration, bool) { - if pi.stats != nil { - return pi.stats.runtime, true - } - return time.Duration(0), false -} - -func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error { - raw, err := fd.Value() - if err != nil { - return err - } - - fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw)) - if err != nil { - return err - } - defer fh.Close() - - if err := scanFdInfoReader(fh, fields); err != nil { - return fmt.Errorf("%s: %w", fh.Name(), err) - } - return nil -} - -var errMissingFields = errors.New("missing fields") - -func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { - var ( - scanner = bufio.NewScanner(r) - scanned int - ) - - for scanner.Scan() { - parts := strings.SplitN(scanner.Text(), "\t", 2) - if len(parts) != 2 { - continue - } - - name := strings.TrimSuffix(parts[0], ":") - field, ok := fields[string(name)] - if !ok { - continue - } - - if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 { - return fmt.Errorf("can't parse field %s: %v", name, err) - } - - scanned++ - } - - if err := scanner.Err(); err != nil { - return err - } - - if scanned != len(fields) { - return errMissingFields - } - - return nil -} - -// EnableStats starts the measuring of the runtime -// and run counts of eBPF programs. -// -// Collecting statistics can have an impact on the performance. -// -// Requires at least 5.8. -func EnableStats(which uint32) (io.Closer, error) { - attr := internal.BPFEnableStatsAttr{ - StatsType: which, - } - - fd, err := internal.BPFEnableStats(&attr) - if err != nil { - return nil, err - } - return fd, nil -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/vendor/github.com/cilium/ebpf/internal/btf/btf.go deleted file mode 100644 index 5da9e11..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/btf.go +++ /dev/null @@ -1,799 +0,0 @@ -package btf - -import ( - "bytes" - "debug/elf" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "reflect" - "sync" - "unsafe" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/unix" -) - -const btfMagic = 0xeB9F - -// Errors returned by BTF functions. -var ( - ErrNotSupported = internal.ErrNotSupported - ErrNotFound = errors.New("not found") - ErrNoExtendedInfo = errors.New("no extended info") -) - -// Spec represents decoded BTF. -type Spec struct { - rawTypes []rawType - strings stringTable - types []Type - namedTypes map[string][]namedType - funcInfos map[string]extInfo - lineInfos map[string]extInfo - coreRelos map[string]coreRelos - byteOrder binary.ByteOrder -} - -type btfHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - HdrLen uint32 - - TypeOff uint32 - TypeLen uint32 - StringOff uint32 - StringLen uint32 -} - -// LoadSpecFromReader reads BTF sections from an ELF. -// -// Returns ErrNotFound if the reader contains no BTF. -func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { - file, err := internal.NewSafeELFFile(rd) - if err != nil { - return nil, err - } - defer file.Close() - - btfSection, btfExtSection, sectionSizes, err := findBtfSections(file) - if err != nil { - return nil, err - } - - if btfSection == nil { - return nil, fmt.Errorf("btf: %w", ErrNotFound) - } - - symbols, err := file.Symbols() - if err != nil { - return nil, fmt.Errorf("can't read symbols: %v", err) - } - - variableOffsets := make(map[variable]uint32) - for _, symbol := range symbols { - if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { - // Ignore things like SHN_ABS - continue - } - - if int(symbol.Section) >= len(file.Sections) { - return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) - } - - secName := file.Sections[symbol.Section].Name - if _, ok := sectionSizes[secName]; !ok { - continue - } - - if symbol.Value > math.MaxUint32 { - return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) - } - - variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) - } - - spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - if btfExtSection == nil { - return spec, nil - } - - spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) - if err != nil { - return nil, fmt.Errorf("can't read ext info: %w", err) - } - - return spec, nil -} - -func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) { - var ( - btfSection *elf.Section - btfExtSection *elf.Section - sectionSizes = make(map[string]uint32) - ) - - for _, sec := range file.Sections { - switch sec.Name { - case ".BTF": - btfSection = sec - case ".BTF.ext": - btfExtSection = sec - default: - if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { - break - } - - if sec.Size > math.MaxUint32 { - return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) - } - - sectionSizes[sec.Name] = uint32(sec.Size) - } - } - return btfSection, btfExtSection, sectionSizes, nil -} - -func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) { - file, err := internal.NewSafeELFFile(rd) - if err != nil { - return nil, err - } - defer file.Close() - - btfSection, _, _, err := findBtfSections(file) - if err != nil { - return nil, fmt.Errorf(".BTF ELF section: %s", err) - } - if btfSection == nil { - return nil, fmt.Errorf("unable to find .BTF ELF section") - } - return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil) -} - -func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { - rawTypes, rawStrings, err := parseBTF(btf, bo) - if err != nil { - return nil, err - } - - err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - types, typesByName, err := inflateRawTypes(rawTypes, rawStrings) - if err != nil { - return nil, err - } - - return &Spec{ - rawTypes: rawTypes, - namedTypes: typesByName, - types: types, - strings: rawStrings, - byteOrder: bo, - }, nil -} - -var kernelBTF struct { - sync.Mutex - *Spec -} - -// LoadKernelSpec returns the current kernel's BTF information. -// -// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns -// ErrNotSupported if BTF is not enabled. -func LoadKernelSpec() (*Spec, error) { - kernelBTF.Lock() - defer kernelBTF.Unlock() - - if kernelBTF.Spec != nil { - return kernelBTF.Spec, nil - } - - var err error - kernelBTF.Spec, err = loadKernelSpec() - return kernelBTF.Spec, err -} - -func loadKernelSpec() (*Spec, error) { - release, err := unix.KernelRelease() - if err != nil { - return nil, fmt.Errorf("can't read kernel release number: %w", err) - } - - fh, err := os.Open("/sys/kernel/btf/vmlinux") - if err == nil { - defer fh.Close() - - return loadNakedSpec(fh, internal.NativeEndian, nil, nil) - } - - // use same list of locations as libbpf - // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 - locations := []string{ - "/boot/vmlinux-%s", - "/lib/modules/%s/vmlinux-%[1]s", - "/lib/modules/%s/build/vmlinux", - "/usr/lib/modules/%s/kernel/vmlinux", - "/usr/lib/debug/boot/vmlinux-%s", - "/usr/lib/debug/boot/vmlinux-%s.debug", - "/usr/lib/debug/lib/modules/%s/vmlinux", - } - - for _, loc := range locations { - path := fmt.Sprintf(loc, release) - - fh, err := os.Open(path) - if err != nil { - continue - } - defer fh.Close() - - return loadSpecFromVmlinux(fh) - } - - return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) -} - -func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) { - rawBTF, err := ioutil.ReadAll(btf) - if err != nil { - return nil, nil, fmt.Errorf("can't read BTF: %v", err) - } - - rd := bytes.NewReader(rawBTF) - - var header btfHeader - if err := binary.Read(rd, bo, &header); err != nil { - return nil, nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, nil, errors.New("header is too short") - } - - if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil { - return nil, nil, fmt.Errorf("header padding: %v", err) - } - - if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err) - } - - rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen))) - if err != nil { - return nil, nil, fmt.Errorf("can't read type names: %w", err) - } - - if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err) - } - - rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo) - if err != nil { - return nil, nil, fmt.Errorf("can't read types: %w", err) - } - - return rawTypes, rawStrings, nil -} - -type variable struct { - section string - name string -} - -func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { - for i, rawType := range rawTypes { - if rawType.Kind() != kindDatasec { - continue - } - - name, err := rawStrings.Lookup(rawType.NameOff) - if err != nil { - return err - } - - if name == ".kconfig" || name == ".ksyms" { - return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) - } - - if rawTypes[i].SizeType != 0 { - continue - } - - size, ok := sectionSizes[name] - if !ok { - return fmt.Errorf("data section %s: missing size", name) - } - - rawTypes[i].SizeType = size - - secinfos := rawType.data.([]btfVarSecinfo) - for j, secInfo := range secinfos { - id := int(secInfo.Type - 1) - if id >= len(rawTypes) { - return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) - } - - varName, err := rawStrings.Lookup(rawTypes[id].NameOff) - if err != nil { - return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) - } - - offset, ok := variableOffsets[variable{name, varName}] - if !ok { - return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) - } - - secinfos[j].Offset = offset - } - } - - return nil -} - -type marshalOpts struct { - ByteOrder binary.ByteOrder - StripFuncLinkage bool -} - -func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { - var ( - buf bytes.Buffer - header = new(btfHeader) - headerLen = binary.Size(header) - ) - - // Reserve space for the header. We have to write it last since - // we don't know the size of the type section yet. - _, _ = buf.Write(make([]byte, headerLen)) - - // Write type section, just after the header. - for _, raw := range s.rawTypes { - switch { - case opts.StripFuncLinkage && raw.Kind() == kindFunc: - raw.SetLinkage(StaticFunc) - } - - if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - } - - typeLen := uint32(buf.Len() - headerLen) - - // Write string section after type section. - _, _ = buf.Write(s.strings) - - // Fill out the header, and write it out. - header = &btfHeader{ - Magic: btfMagic, - Version: 1, - Flags: 0, - HdrLen: uint32(headerLen), - TypeOff: 0, - TypeLen: typeLen, - StringOff: typeLen, - StringLen: uint32(len(s.strings)), - } - - raw := buf.Bytes() - err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) - if err != nil { - return nil, fmt.Errorf("can't write header: %v", err) - } - - return raw, nil -} - -type sliceWriter []byte - -func (sw sliceWriter) Write(p []byte) (int, error) { - if len(p) != len(sw) { - return 0, errors.New("size doesn't match") - } - - return copy(sw, p), nil -} - -// Program finds the BTF for a specific section. -// -// Length is the number of bytes in the raw BPF instruction stream. -// -// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't -// contain extended BTF info. -func (s *Spec) Program(name string, length uint64) (*Program, error) { - if length == 0 { - return nil, errors.New("length musn't be zero") - } - - if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil { - return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo) - } - - funcInfos, funcOK := s.funcInfos[name] - lineInfos, lineOK := s.lineInfos[name] - relos, coreOK := s.coreRelos[name] - - if !funcOK && !lineOK && !coreOK { - return nil, fmt.Errorf("no extended BTF info for section %s", name) - } - - return &Program{s, length, funcInfos, lineInfos, relos}, nil -} - -// Datasec returns the BTF required to create maps which represent data sections. -func (s *Spec) Datasec(name string) (*Map, error) { - var datasec Datasec - if err := s.FindType(name, &datasec); err != nil { - return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err) - } - - m := NewMap(s, &Void{}, &datasec) - return &m, nil -} - -// FindType searches for a type with a specific name. -// -// hint determines the type of the returned Type. -// -// Returns an error wrapping ErrNotFound if no matching -// type exists in spec. -func (s *Spec) FindType(name string, typ Type) error { - var ( - wanted = reflect.TypeOf(typ) - candidate Type - ) - - for _, typ := range s.namedTypes[essentialName(name)] { - if reflect.TypeOf(typ) != wanted { - continue - } - - // Match against the full name, not just the essential one. - if typ.name() != name { - continue - } - - if candidate != nil { - return fmt.Errorf("type %s: multiple candidates for %T", name, typ) - } - - candidate = typ - } - - if candidate == nil { - return fmt.Errorf("type %s: %w", name, ErrNotFound) - } - - cpy, _ := copyType(candidate, nil) - value := reflect.Indirect(reflect.ValueOf(cpy)) - reflect.Indirect(reflect.ValueOf(typ)).Set(value) - return nil -} - -// Handle is a reference to BTF loaded into the kernel. -type Handle struct { - fd *internal.FD -} - -// NewHandle loads BTF into the kernel. -// -// Returns ErrNotSupported if BTF is not supported. -func NewHandle(spec *Spec) (*Handle, error) { - if err := haveBTF(); err != nil { - return nil, err - } - - if spec.byteOrder != internal.NativeEndian { - return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) - } - - btf, err := spec.marshal(marshalOpts{ - ByteOrder: internal.NativeEndian, - StripFuncLinkage: haveFuncLinkage() != nil, - }) - if err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - - if uint64(len(btf)) > math.MaxUint32 { - return nil, errors.New("BTF exceeds the maximum size") - } - - attr := &bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - } - - fd, err := bpfLoadBTF(attr) - if err != nil { - logBuf := make([]byte, 64*1024) - attr.logBuf = internal.NewSlicePointer(logBuf) - attr.btfLogSize = uint32(len(logBuf)) - attr.btfLogLevel = 1 - _, logErr := bpfLoadBTF(attr) - return nil, internal.ErrorWithLog(err, logBuf, logErr) - } - - return &Handle{fd}, nil -} - -// Close destroys the handle. -// -// Subsequent calls to FD will return an invalid value. -func (h *Handle) Close() error { - return h.fd.Close() -} - -// FD returns the file descriptor for the handle. -func (h *Handle) FD() int { - value, err := h.fd.Value() - if err != nil { - return -1 - } - - return int(value) -} - -// Map is the BTF for a map. -type Map struct { - spec *Spec - key, value Type -} - -// NewMap returns a new Map containing the given values. -// The key and value arguments are initialized to Void if nil values are given. -func NewMap(spec *Spec, key Type, value Type) Map { - if key == nil { - key = &Void{} - } - if value == nil { - value = &Void{} - } - - return Map{ - spec: spec, - key: key, - value: value, - } -} - -// MapSpec should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapSpec(m *Map) *Spec { - return m.spec -} - -// MapKey should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapKey(m *Map) Type { - return m.key -} - -// MapValue should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapValue(m *Map) Type { - return m.value -} - -// Program is the BTF information for a stream of instructions. -type Program struct { - spec *Spec - length uint64 - funcInfos, lineInfos extInfo - coreRelos coreRelos -} - -// ProgramSpec returns the Spec needed for loading function and line infos into the kernel. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramSpec(s *Program) *Spec { - return s.spec -} - -// ProgramAppend the information from other to the Program. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramAppend(s, other *Program) error { - funcInfos, err := s.funcInfos.append(other.funcInfos, s.length) - if err != nil { - return fmt.Errorf("func infos: %w", err) - } - - lineInfos, err := s.lineInfos.append(other.lineInfos, s.length) - if err != nil { - return fmt.Errorf("line infos: %w", err) - } - - s.funcInfos = funcInfos - s.lineInfos = lineInfos - s.coreRelos = s.coreRelos.append(other.coreRelos, s.length) - s.length += other.length - return nil -} - -// ProgramFuncInfos returns the binary form of BTF function infos. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) { - bytes, err = s.funcInfos.MarshalBinary() - if err != nil { - return 0, nil, err - } - - return s.funcInfos.recordSize, bytes, nil -} - -// ProgramLineInfos returns the binary form of BTF line infos. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) { - bytes, err = s.lineInfos.MarshalBinary() - if err != nil { - return 0, nil, err - } - - return s.lineInfos.recordSize, bytes, nil -} - -// ProgramFixups returns the changes required to adjust the program to the target. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramFixups(s *Program, target *Spec) (COREFixups, error) { - if len(s.coreRelos) == 0 { - return nil, nil - } - - if target == nil { - var err error - target, err = LoadKernelSpec() - if err != nil { - return nil, err - } - } - - return coreRelocate(s.spec, target, s.coreRelos) -} - -type bpfLoadBTFAttr struct { - btf internal.Pointer - logBuf internal.Pointer - btfSize uint32 - btfLogSize uint32 - btfLogLevel uint32 -} - -func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) { - fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return internal.NewFD(uint32(fd)), nil -} - -func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { - const minHeaderLength = 24 - - typesLen := uint32(binary.Size(types)) - header := btfHeader{ - Magic: btfMagic, - Version: 1, - HdrLen: minHeaderLength, - TypeOff: 0, - TypeLen: typesLen, - StringOff: typesLen, - StringLen: uint32(len(strings)), - } - - buf := new(bytes.Buffer) - _ = binary.Write(buf, bo, &header) - _ = binary.Write(buf, bo, types) - buf.Write(strings) - - return buf.Bytes() -} - -var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { - var ( - types struct { - Integer btfType - Var btfType - btfVar struct{ Linkage uint32 } - } - strings = []byte{0, 'a', 0} - ) - - // We use a BTF_KIND_VAR here, to make sure that - // the kernel understands BTF at least as well as we - // do. BTF_KIND_VAR was introduced ~5.1. - types.Integer.SetKind(kindPointer) - types.Var.NameOff = 1 - types.Var.SetKind(kindVar) - types.Var.SizeType = 1 - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { - // Treat both EINVAL and EPERM as not supported: loading the program - // might still succeed without BTF. - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) - -var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { - if err := haveBTF(); err != nil { - return err - } - - var ( - types struct { - FuncProto btfType - Func btfType - } - strings = []byte{0, 'a', 0} - ) - - types.FuncProto.SetKind(kindFuncProto) - types.Func.SetKind(kindFunc) - types.Func.SizeType = 1 // aka FuncProto - types.Func.NameOff = 1 - types.Func.SetLinkage(GlobalFunc) - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) { - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go deleted file mode 100644 index a5ef945..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go +++ /dev/null @@ -1,282 +0,0 @@ -package btf - -import ( - "encoding/binary" - "fmt" - "io" -) - -//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage - -// btfKind describes a Type. -type btfKind uint8 - -// Equivalents of the BTF_KIND_* constants. -const ( - kindUnknown btfKind = iota - kindInt - kindPointer - kindArray - kindStruct - kindUnion - kindEnum - kindForward - kindTypedef - kindVolatile - kindConst - kindRestrict - // Added ~4.20 - kindFunc - kindFuncProto - // Added ~5.1 - kindVar - kindDatasec -) - -// FuncLinkage describes BTF function linkage metadata. -type FuncLinkage int - -// Equivalent of enum btf_func_linkage. -const ( - StaticFunc FuncLinkage = iota // static - GlobalFunc // global - ExternFunc // extern -) - -// VarLinkage describes BTF variable linkage metadata. -type VarLinkage int - -const ( - StaticVar VarLinkage = iota // static - GlobalVar // global - ExternVar // extern -) - -const ( - btfTypeKindShift = 24 - btfTypeKindLen = 4 - btfTypeVlenShift = 0 - btfTypeVlenMask = 16 - btfTypeKindFlagShift = 31 - btfTypeKindFlagMask = 1 -) - -// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. -type btfType struct { - NameOff uint32 - /* "info" bits arrangement - * bits 0-15: vlen (e.g. # of struct's members), linkage - * bits 16-23: unused - * bits 24-27: kind (e.g. int, ptr, array...etc) - * bits 28-30: unused - * bit 31: kind_flag, currently used by - * struct, union and fwd - */ - Info uint32 - /* "size" is used by INT, ENUM, STRUCT and UNION. - * "size" tells the size of the type it is describing. - * - * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC and FUNC_PROTO. - * "type" is a type_id referring to another type. - */ - SizeType uint32 -} - -func (k btfKind) String() string { - switch k { - case kindUnknown: - return "Unknown" - case kindInt: - return "Integer" - case kindPointer: - return "Pointer" - case kindArray: - return "Array" - case kindStruct: - return "Struct" - case kindUnion: - return "Union" - case kindEnum: - return "Enumeration" - case kindForward: - return "Forward" - case kindTypedef: - return "Typedef" - case kindVolatile: - return "Volatile" - case kindConst: - return "Const" - case kindRestrict: - return "Restrict" - case kindFunc: - return "Function" - case kindFuncProto: - return "Function Proto" - case kindVar: - return "Variable" - case kindDatasec: - return "Section" - default: - return fmt.Sprintf("Unknown (%d)", k) - } -} - -func mask(len uint32) uint32 { - return (1 << len) - 1 -} - -func (bt *btfType) info(len, shift uint32) uint32 { - return (bt.Info >> shift) & mask(len) -} - -func (bt *btfType) setInfo(value, len, shift uint32) { - bt.Info &^= mask(len) << shift - bt.Info |= (value & mask(len)) << shift -} - -func (bt *btfType) Kind() btfKind { - return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) -} - -func (bt *btfType) SetKind(kind btfKind) { - bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) -} - -func (bt *btfType) Vlen() int { - return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) -} - -func (bt *btfType) SetVlen(vlen int) { - bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) -} - -func (bt *btfType) KindFlag() bool { - return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 -} - -func (bt *btfType) Linkage() FuncLinkage { - return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) -} - -func (bt *btfType) SetLinkage(linkage FuncLinkage) { - bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) -} - -func (bt *btfType) Type() TypeID { - // TODO: Panic here if wrong kind? - return TypeID(bt.SizeType) -} - -func (bt *btfType) Size() uint32 { - // TODO: Panic here if wrong kind? - return bt.SizeType -} - -type rawType struct { - btfType - data interface{} -} - -func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { - if err := binary.Write(w, bo, &rt.btfType); err != nil { - return err - } - - if rt.data == nil { - return nil - } - - return binary.Write(w, bo, rt.data) -} - -type btfArray struct { - Type TypeID - IndexType TypeID - Nelems uint32 -} - -type btfMember struct { - NameOff uint32 - Type TypeID - Offset uint32 -} - -type btfVarSecinfo struct { - Type TypeID - Offset uint32 - Size uint32 -} - -type btfVariable struct { - Linkage uint32 -} - -type btfEnum struct { - NameOff uint32 - Val int32 -} - -type btfParam struct { - NameOff uint32 - Type TypeID -} - -func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { - var ( - header btfType - types []rawType - ) - - for id := TypeID(1); ; id++ { - if err := binary.Read(r, bo, &header); err == io.EOF { - return types, nil - } else if err != nil { - return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) - } - - var data interface{} - switch header.Kind() { - case kindInt: - data = new(uint32) - case kindPointer: - case kindArray: - data = new(btfArray) - case kindStruct: - fallthrough - case kindUnion: - data = make([]btfMember, header.Vlen()) - case kindEnum: - data = make([]btfEnum, header.Vlen()) - case kindForward: - case kindTypedef: - case kindVolatile: - case kindConst: - case kindRestrict: - case kindFunc: - case kindFuncProto: - data = make([]btfParam, header.Vlen()) - case kindVar: - data = new(btfVariable) - case kindDatasec: - data = make([]btfVarSecinfo, header.Vlen()) - default: - return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind()) - } - - if data == nil { - types = append(types, rawType{header, nil}) - continue - } - - if err := binary.Read(r, bo, data); err != nil { - return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err) - } - - types = append(types, rawType{header, data}) - } -} - -func intEncoding(raw uint32) (IntEncoding, uint32, byte) { - return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff) -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go deleted file mode 100644 index 0e0c17d..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT. - -package btf - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[StaticFunc-0] - _ = x[GlobalFunc-1] - _ = x[ExternFunc-2] -} - -const _FuncLinkage_name = "staticglobalextern" - -var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} - -func (i FuncLinkage) String() string { - if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) { - return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[StaticVar-0] - _ = x[GlobalVar-1] - _ = x[ExternVar-2] -} - -const _VarLinkage_name = "staticglobalextern" - -var _VarLinkage_index = [...]uint8{0, 6, 12, 18} - -func (i VarLinkage) String() string { - if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) { - return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]] -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/core.go b/vendor/github.com/cilium/ebpf/internal/btf/core.go deleted file mode 100644 index 7c888f6..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/core.go +++ /dev/null @@ -1,887 +0,0 @@ -package btf - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/cilium/ebpf/asm" -) - -// Code in this file is derived from libbpf, which is available under a BSD -// 2-Clause license. - -// COREFixup is the result of computing a CO-RE relocation for a target. -type COREFixup struct { - Kind COREKind - Local uint32 - Target uint32 - Poison bool -} - -func (f COREFixup) equal(other COREFixup) bool { - return f.Local == other.Local && f.Target == other.Target -} - -func (f COREFixup) String() string { - if f.Poison { - return fmt.Sprintf("%s=poison", f.Kind) - } - return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target) -} - -func (f COREFixup) apply(ins *asm.Instruction) error { - if f.Poison { - return errors.New("can't poison individual instruction") - } - - switch class := ins.OpCode.Class(); class { - case asm.LdXClass, asm.StClass, asm.StXClass: - if want := int16(f.Local); want != ins.Offset { - return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want) - } - - if f.Target > math.MaxInt16 { - return fmt.Errorf("offset %d exceeds MaxInt16", f.Target) - } - - ins.Offset = int16(f.Target) - - case asm.LdClass: - if !ins.IsConstantLoad(asm.DWord) { - return fmt.Errorf("not a dword-sized immediate load") - } - - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) - } - - ins.Constant = int64(f.Target) - - case asm.ALUClass: - if ins.OpCode.ALUOp() == asm.Swap { - return fmt.Errorf("relocation against swap") - } - - fallthrough - - case asm.ALU64Class: - if src := ins.OpCode.Source(); src != asm.ImmSource { - return fmt.Errorf("invalid source %s", src) - } - - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) - } - - if f.Target > math.MaxInt32 { - return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target) - } - - ins.Constant = int64(f.Target) - - default: - return fmt.Errorf("invalid class %s", class) - } - - return nil -} - -func (f COREFixup) isNonExistant() bool { - return f.Kind.checksForExistence() && f.Target == 0 -} - -type COREFixups map[uint64]COREFixup - -// Apply a set of CO-RE relocations to a BPF program. -func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) { - if len(fs) == 0 { - cpy := make(asm.Instructions, len(insns)) - copy(cpy, insns) - return insns, nil - } - - cpy := make(asm.Instructions, 0, len(insns)) - iter := insns.Iterate() - for iter.Next() { - fixup, ok := fs[iter.Offset.Bytes()] - if !ok { - cpy = append(cpy, *iter.Ins) - continue - } - - ins := *iter.Ins - if fixup.Poison { - const badRelo = asm.BuiltinFunc(0xbad2310) - - cpy = append(cpy, badRelo.Call()) - if ins.OpCode.IsDWordLoad() { - // 64 bit constant loads occupy two raw bpf instructions, so - // we need to add another instruction as padding. - cpy = append(cpy, badRelo.Call()) - } - - continue - } - - if err := fixup.apply(&ins); err != nil { - return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err) - } - - cpy = append(cpy, ins) - } - - return cpy, nil -} - -// COREKind is the type of CO-RE relocation -type COREKind uint32 - -const ( - reloFieldByteOffset COREKind = iota /* field byte offset */ - reloFieldByteSize /* field size in bytes */ - reloFieldExists /* field existence in target kernel */ - reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ - reloFieldLShiftU64 /* bitfield-specific left bitshift */ - reloFieldRShiftU64 /* bitfield-specific right bitshift */ - reloTypeIDLocal /* type ID in local BPF object */ - reloTypeIDTarget /* type ID in target kernel */ - reloTypeExists /* type existence in target kernel */ - reloTypeSize /* type size in bytes */ - reloEnumvalExists /* enum value existence in target kernel */ - reloEnumvalValue /* enum value integer value */ -) - -func (k COREKind) String() string { - switch k { - case reloFieldByteOffset: - return "byte_off" - case reloFieldByteSize: - return "byte_sz" - case reloFieldExists: - return "field_exists" - case reloFieldSigned: - return "signed" - case reloFieldLShiftU64: - return "lshift_u64" - case reloFieldRShiftU64: - return "rshift_u64" - case reloTypeIDLocal: - return "local_type_id" - case reloTypeIDTarget: - return "target_type_id" - case reloTypeExists: - return "type_exists" - case reloTypeSize: - return "type_size" - case reloEnumvalExists: - return "enumval_exists" - case reloEnumvalValue: - return "enumval_value" - default: - return "unknown" - } -} - -func (k COREKind) checksForExistence() bool { - return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists -} - -func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { - if local.byteOrder != target.byteOrder { - return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) - } - - var ids []TypeID - relosByID := make(map[TypeID]coreRelos) - result := make(COREFixups, len(relos)) - for _, relo := range relos { - if relo.kind == reloTypeIDLocal { - // Filtering out reloTypeIDLocal here makes our lives a lot easier - // down the line, since it doesn't have a target at all. - if len(relo.accessor) > 1 || relo.accessor[0] != 0 { - return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) - } - - result[uint64(relo.insnOff)] = COREFixup{ - relo.kind, - uint32(relo.typeID), - uint32(relo.typeID), - false, - } - continue - } - - relos, ok := relosByID[relo.typeID] - if !ok { - ids = append(ids, relo.typeID) - } - relosByID[relo.typeID] = append(relos, relo) - } - - // Ensure we work on relocations in a deterministic order. - sort.Slice(ids, func(i, j int) bool { - return ids[i] < ids[j] - }) - - for _, id := range ids { - if int(id) >= len(local.types) { - return nil, fmt.Errorf("invalid type id %d", id) - } - - localType := local.types[id] - named, ok := localType.(namedType) - if !ok || named.name() == "" { - return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) - } - - relos := relosByID[id] - targets := target.namedTypes[named.essentialName()] - fixups, err := coreCalculateFixups(localType, targets, relos) - if err != nil { - return nil, fmt.Errorf("relocate %s: %w", localType, err) - } - - for i, relo := range relos { - result[uint64(relo.insnOff)] = fixups[i] - } - } - - return result, nil -} - -var errAmbiguousRelocation = errors.New("ambiguous relocation") -var errImpossibleRelocation = errors.New("impossible relocation") - -// coreCalculateFixups calculates the fixups for the given relocations using -// the "best" target. -// -// The best target is determined by scoring: the less poisoning we have to do -// the better the target is. -func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]COREFixup, error) { - localID := local.ID() - local, err := copyType(local, skipQualifierAndTypedef) - if err != nil { - return nil, err - } - - bestScore := len(relos) - var bestFixups []COREFixup - for i := range targets { - targetID := targets[i].ID() - target, err := copyType(targets[i], skipQualifierAndTypedef) - if err != nil { - return nil, err - } - - score := 0 // lower is better - fixups := make([]COREFixup, 0, len(relos)) - for _, relo := range relos { - fixup, err := coreCalculateFixup(local, localID, target, targetID, relo) - if err != nil { - return nil, fmt.Errorf("target %s: %w", target, err) - } - if fixup.Poison || fixup.isNonExistant() { - score++ - } - fixups = append(fixups, fixup) - } - - if score > bestScore { - // We have a better target already, ignore this one. - continue - } - - if score < bestScore { - // This is the best target yet, use it. - bestScore = score - bestFixups = fixups - continue - } - - // Some other target has the same score as the current one. Make sure - // the fixups agree with each other. - for i, fixup := range bestFixups { - if !fixup.equal(fixups[i]) { - return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation) - } - } - } - - if bestFixups == nil { - // Nothing at all matched, probably because there are no suitable - // targets at all. Poison everything! - bestFixups = make([]COREFixup, len(relos)) - for i, relo := range relos { - bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true} - } - } - - return bestFixups, nil -} - -// coreCalculateFixup calculates the fixup for a single local type, target type -// and relocation. -func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) { - fixup := func(local, target uint32) (COREFixup, error) { - return COREFixup{relo.kind, local, target, false}, nil - } - poison := func() (COREFixup, error) { - if relo.kind.checksForExistence() { - return fixup(1, 0) - } - return COREFixup{relo.kind, 0, 0, true}, nil - } - zero := COREFixup{} - - switch relo.kind { - case reloTypeIDTarget, reloTypeSize, reloTypeExists: - if len(relo.accessor) > 1 || relo.accessor[0] != 0 { - return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) - } - - err := coreAreTypesCompatible(local, target) - if errors.Is(err, errImpossibleRelocation) { - return poison() - } - if err != nil { - return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) - } - - switch relo.kind { - case reloTypeExists: - return fixup(1, 1) - - case reloTypeIDTarget: - return fixup(uint32(localID), uint32(targetID)) - - case reloTypeSize: - localSize, err := Sizeof(local) - if err != nil { - return zero, err - } - - targetSize, err := Sizeof(target) - if err != nil { - return zero, err - } - - return fixup(uint32(localSize), uint32(targetSize)) - } - - case reloEnumvalValue, reloEnumvalExists: - localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) - if errors.Is(err, errImpossibleRelocation) { - return poison() - } - if err != nil { - return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) - } - - switch relo.kind { - case reloEnumvalExists: - return fixup(1, 1) - - case reloEnumvalValue: - return fixup(uint32(localValue.Value), uint32(targetValue.Value)) - } - - case reloFieldByteOffset, reloFieldByteSize, reloFieldExists: - if _, ok := target.(*Fwd); ok { - // We can't relocate fields using a forward declaration, so - // skip it. If a non-forward declaration is present in the BTF - // we'll find it in one of the other iterations. - return poison() - } - - localField, targetField, err := coreFindField(local, relo.accessor, target) - if errors.Is(err, errImpossibleRelocation) { - return poison() - } - if err != nil { - return zero, fmt.Errorf("target %s: %w", target, err) - } - - switch relo.kind { - case reloFieldExists: - return fixup(1, 1) - - case reloFieldByteOffset: - return fixup(localField.offset/8, targetField.offset/8) - - case reloFieldByteSize: - localSize, err := Sizeof(localField.Type) - if err != nil { - return zero, err - } - - targetSize, err := Sizeof(targetField.Type) - if err != nil { - return zero, err - } - - return fixup(uint32(localSize), uint32(targetSize)) - - } - } - - return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported) -} - -/* coreAccessor contains a path through a struct. It contains at least one index. - * - * The interpretation depends on the kind of the relocation. The following is - * taken from struct bpf_core_relo in libbpf_internal.h: - * - * - for field-based relocations, string encodes an accessed field using - * a sequence of field and array indices, separated by colon (:). It's - * conceptually very close to LLVM's getelementptr ([0]) instruction's - * arguments for identifying offset to a field. - * - for type-based relocations, strings is expected to be just "0"; - * - for enum value-based relocations, string contains an index of enum - * value within its enum type; - * - * Example to provide a better feel. - * - * struct sample { - * int a; - * struct { - * int b[10]; - * }; - * }; - * - * struct sample s = ...; - * int x = &s->a; // encoded as "0:0" (a is field #0) - * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, - * // b is field #0 inside anon struct, accessing elem #5) - * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) - */ -type coreAccessor []int - -func parseCoreAccessor(accessor string) (coreAccessor, error) { - if accessor == "" { - return nil, fmt.Errorf("empty accessor") - } - - var result coreAccessor - parts := strings.Split(accessor, ":") - for _, part := range parts { - // 31 bits to avoid overflowing int on 32 bit platforms. - index, err := strconv.ParseUint(part, 10, 31) - if err != nil { - return nil, fmt.Errorf("accessor index %q: %s", part, err) - } - - result = append(result, int(index)) - } - - return result, nil -} - -func (ca coreAccessor) String() string { - strs := make([]string, 0, len(ca)) - for _, i := range ca { - strs = append(strs, strconv.Itoa(i)) - } - return strings.Join(strs, ":") -} - -func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { - e, ok := t.(*Enum) - if !ok { - return nil, fmt.Errorf("not an enum: %s", t) - } - - if len(ca) > 1 { - return nil, fmt.Errorf("invalid accessor %s for enum", ca) - } - - i := ca[0] - if i >= len(e.Values) { - return nil, fmt.Errorf("invalid index %d for %s", i, e) - } - - return &e.Values[i], nil -} - -type coreField struct { - Type Type - offset uint32 -} - -func adjustOffset(base uint32, t Type, n int) (uint32, error) { - size, err := Sizeof(t) - if err != nil { - return 0, err - } - - return base + (uint32(n) * uint32(size) * 8), nil -} - -// coreFindField descends into the local type using the accessor and tries to -// find an equivalent field in target at each step. -// -// Returns the field and the offset of the field from the start of -// target in bits. -func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) { - // The first index is used to offset a pointer of the base type like - // when accessing an array. - localOffset, err := adjustOffset(0, local, localAcc[0]) - if err != nil { - return coreField{}, coreField{}, err - } - - targetOffset, err := adjustOffset(0, target, localAcc[0]) - if err != nil { - return coreField{}, coreField{}, err - } - - if err := coreAreMembersCompatible(local, target); err != nil { - return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) - } - - var localMaybeFlex, targetMaybeFlex bool - for _, acc := range localAcc[1:] { - switch localType := local.(type) { - case composite: - // For composite types acc is used to find the field in the local type, - // and then we try to find a field in target with the same name. - localMembers := localType.members() - if acc >= len(localMembers) { - return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local) - } - - localMember := localMembers[acc] - if localMember.Name == "" { - _, ok := localMember.Type.(composite) - if !ok { - return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) - } - - // This is an anonymous struct or union, ignore it. - local = localMember.Type - localOffset += localMember.Offset - localMaybeFlex = false - continue - } - - targetType, ok := target.(composite) - if !ok { - return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) - } - - targetMember, last, err := coreFindMember(targetType, localMember.Name) - if err != nil { - return coreField{}, coreField{}, err - } - - if targetMember.BitfieldSize > 0 { - return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported) - } - - local = localMember.Type - localMaybeFlex = acc == len(localMembers)-1 - localOffset += localMember.Offset - target = targetMember.Type - targetMaybeFlex = last - targetOffset += targetMember.Offset - - case *Array: - // For arrays, acc is the index in the target. - targetType, ok := target.(*Array) - if !ok { - return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) - } - - if localType.Nelems == 0 && !localMaybeFlex { - return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") - } - if targetType.Nelems == 0 && !targetMaybeFlex { - return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") - } - - if localType.Nelems > 0 && acc >= int(localType.Nelems) { - return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) - } - if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { - return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) - } - - local = localType.Type - localMaybeFlex = false - localOffset, err = adjustOffset(localOffset, local, acc) - if err != nil { - return coreField{}, coreField{}, err - } - - target = targetType.Type - targetMaybeFlex = false - targetOffset, err = adjustOffset(targetOffset, target, acc) - if err != nil { - return coreField{}, coreField{}, err - } - - default: - return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) - } - - if err := coreAreMembersCompatible(local, target); err != nil { - return coreField{}, coreField{}, err - } - } - - return coreField{local, localOffset}, coreField{target, targetOffset}, nil -} - -// coreFindMember finds a member in a composite type while handling anonymous -// structs and unions. -func coreFindMember(typ composite, name Name) (Member, bool, error) { - if name == "" { - return Member{}, false, errors.New("can't search for anonymous member") - } - - type offsetTarget struct { - composite - offset uint32 - } - - targets := []offsetTarget{{typ, 0}} - visited := make(map[composite]bool) - - for i := 0; i < len(targets); i++ { - target := targets[i] - - // Only visit targets once to prevent infinite recursion. - if visited[target] { - continue - } - if len(visited) >= maxTypeDepth { - // This check is different than libbpf, which restricts the entire - // path to BPF_CORE_SPEC_MAX_LEN items. - return Member{}, false, fmt.Errorf("type is nested too deep") - } - visited[target] = true - - members := target.members() - for j, member := range members { - if member.Name == name { - // NB: This is safe because member is a copy. - member.Offset += target.offset - return member, j == len(members)-1, nil - } - - // The names don't match, but this member could be an anonymous struct - // or union. - if member.Name != "" { - continue - } - - comp, ok := member.Type.(composite) - if !ok { - return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) - } - - targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) - } - } - - return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) -} - -// coreFindEnumValue follows localAcc to find the equivalent enum value in target. -func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { - localValue, err := localAcc.enumValue(local) - if err != nil { - return nil, nil, err - } - - targetEnum, ok := target.(*Enum) - if !ok { - return nil, nil, errImpossibleRelocation - } - - localName := localValue.Name.essentialName() - for i, targetValue := range targetEnum.Values { - if targetValue.Name.essentialName() != localName { - continue - } - - return localValue, &targetEnum.Values[i], nil - } - - return nil, nil, errImpossibleRelocation -} - -/* The comment below is from bpf_core_types_are_compat in libbpf.c: - * - * Check local and target types for compatibility. This check is used for - * type-based CO-RE relocations and follow slightly different rules than - * field-based relocations. This function assumes that root types were already - * checked for name match. Beyond that initial root-level name check, names - * are completely ignored. Compatibility rules are as follows: - * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but - * kind should match for local and target types (i.e., STRUCT is not - * compatible with UNION); - * - for ENUMs, the size is ignored; - * - for INT, size and signedness are ignored; - * - for ARRAY, dimensionality is ignored, element types are checked for - * compatibility recursively; - * - CONST/VOLATILE/RESTRICT modifiers are ignored; - * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; - * - FUNC_PROTOs are compatible if they have compatible signature: same - * number of input args and compatible return and argument types. - * These rules are not set in stone and probably will be adjusted as we get - * more experience with using BPF CO-RE relocations. - * - * Returns errImpossibleRelocation if types are not compatible. - */ -func coreAreTypesCompatible(localType Type, targetType Type) error { - var ( - localTs, targetTs typeDeque - l, t = &localType, &targetType - depth = 0 - ) - - for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() { - if depth >= maxTypeDepth { - return errors.New("types are nested too deep") - } - - localType = *l - targetType = *t - - if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { - return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) - } - - switch lv := (localType).(type) { - case *Void, *Struct, *Union, *Enum, *Fwd: - // Nothing to do here - - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - - case *Pointer, *Array: - depth++ - localType.walk(&localTs) - targetType.walk(&targetTs) - - case *FuncProto: - tv := targetType.(*FuncProto) - if len(lv.Params) != len(tv.Params) { - return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation) - } - - depth++ - localType.walk(&localTs) - targetType.walk(&targetTs) - - default: - return fmt.Errorf("unsupported type %T", localType) - } - } - - if l != nil { - return fmt.Errorf("dangling local type %T", *l) - } - - if t != nil { - return fmt.Errorf("dangling target type %T", *t) - } - - return nil -} - -/* coreAreMembersCompatible checks two types for field-based relocation compatibility. - * - * The comment below is from bpf_core_fields_are_compat in libbpf.c: - * - * Check two types for compatibility for the purpose of field access - * relocation. const/volatile/restrict and typedefs are skipped to ensure we - * are relocating semantically compatible entities: - * - any two STRUCTs/UNIONs are compatible and can be mixed; - * - any two FWDs are compatible, if their names match (modulo flavor suffix); - * - any two PTRs are always compatible; - * - for ENUMs, names should be the same (ignoring flavor suffix) or at - * least one of enums should be anonymous; - * - for ENUMs, check sizes, names are ignored; - * - for INT, size and signedness are ignored; - * - for ARRAY, dimensionality is ignored, element types are checked for - * compatibility recursively; - * [ NB: coreAreMembersCompatible doesn't recurse, this check is done - * by coreFindField. ] - * - everything else shouldn't be ever a target of relocation. - * These rules are not set in stone and probably will be adjusted as we get - * more experience with using BPF CO-RE relocations. - * - * Returns errImpossibleRelocation if the members are not compatible. - */ -func coreAreMembersCompatible(localType Type, targetType Type) error { - doNamesMatch := func(a, b string) error { - if a == "" || b == "" { - // allow anonymous and named type to match - return nil - } - - if essentialName(a) == essentialName(b) { - return nil - } - - return fmt.Errorf("names don't match: %w", errImpossibleRelocation) - } - - _, lok := localType.(composite) - _, tok := targetType.(composite) - if lok && tok { - return nil - } - - if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { - return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) - } - - switch lv := localType.(type) { - case *Array, *Pointer: - return nil - - case *Enum: - tv := targetType.(*Enum) - return doNamesMatch(lv.name(), tv.name()) - - case *Fwd: - tv := targetType.(*Fwd) - return doNamesMatch(lv.name(), tv.name()) - - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - return nil - - default: - return fmt.Errorf("type %s: %w", localType, ErrNotSupported) - } -} - -func skipQualifierAndTypedef(typ Type) (Type, error) { - result := typ - for depth := 0; depth <= maxTypeDepth; depth++ { - switch v := (result).(type) { - case qualifier: - result = v.qualify() - case *Typedef: - result = v.Type - default: - return result, nil - } - } - return nil, errors.New("exceeded type depth") -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/vendor/github.com/cilium/ebpf/internal/btf/doc.go deleted file mode 100644 index ad2576c..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package btf handles data encoded according to the BPF Type Format. -// -// The canonical documentation lives in the Linux kernel repository and is -// available at https://www.kernel.org/doc/html/latest/bpf/btf.html -// -// The API is very much unstable. You should only use this via the main -// ebpf library. -package btf diff --git a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go deleted file mode 100644 index beba1bc..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go +++ /dev/null @@ -1,303 +0,0 @@ -package btf - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" -) - -type btfExtHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - HdrLen uint32 - - FuncInfoOff uint32 - FuncInfoLen uint32 - LineInfoOff uint32 - LineInfoLen uint32 -} - -type btfExtCoreHeader struct { - CoreReloOff uint32 - CoreReloLen uint32 -} - -func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) { - var header btfExtHeader - var coreHeader btfExtCoreHeader - if err := binary.Read(r, bo, &header); err != nil { - return nil, nil, nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, nil, nil, errors.New("header is too short") - } - - coreHdrSize := int64(binary.Size(&coreHeader)) - if remainder >= coreHdrSize { - if err := binary.Read(r, bo, &coreHeader); err != nil { - return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err) - } - remainder -= coreHdrSize - } - - // Of course, the .BTF.ext header has different semantics than the - // .BTF ext header. We need to ignore non-null values. - _, err = io.CopyN(ioutil.Discard, r, remainder) - if err != nil { - return nil, nil, nil, fmt.Errorf("header padding: %v", err) - } - - if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err) - } - - buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen))) - funcInfo, err = parseExtInfo(buf, bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("function info: %w", err) - } - - if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err) - } - - buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen))) - lineInfo, err = parseExtInfo(buf, bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("line info: %w", err) - } - - if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { - if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err) - } - - relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err) - } - } - - return funcInfo, lineInfo, relos, nil -} - -type btfExtInfoSec struct { - SecNameOff uint32 - NumInfo uint32 -} - -type extInfoRecord struct { - InsnOff uint64 - Opaque []byte -} - -type extInfo struct { - recordSize uint32 - records []extInfoRecord -} - -func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { - if other.recordSize != ei.recordSize { - return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize) - } - - records := make([]extInfoRecord, 0, len(ei.records)+len(other.records)) - records = append(records, ei.records...) - for _, info := range other.records { - records = append(records, extInfoRecord{ - InsnOff: info.InsnOff + offset, - Opaque: info.Opaque, - }) - } - return extInfo{ei.recordSize, records}, nil -} - -func (ei extInfo) MarshalBinary() ([]byte, error) { - if len(ei.records) == 0 { - return nil, nil - } - - buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records))) - for _, info := range ei.records { - // The kernel expects offsets in number of raw bpf instructions, - // while the ELF tracks it in bytes. - insnOff := uint32(info.InsnOff / asm.InstructionSize) - if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil { - return nil, fmt.Errorf("can't write instruction offset: %v", err) - } - - buf.Write(info.Opaque) - } - - return buf.Bytes(), nil -} - -func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) { - const maxRecordSize = 256 - - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("can't read record size: %v", err) - } - - if recordSize < 4 { - // Need at least insnOff - return nil, errors.New("record size too short") - } - if recordSize > maxRecordSize { - return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) - } - - result := make(map[string]extInfo) - for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - - var records []extInfoRecord - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var byteOff uint32 - if err := binary.Read(r, bo, &byteOff); err != nil { - return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err) - } - - buf := make([]byte, int(recordSize-4)) - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("section %v: can't read record: %v", secName, err) - } - - if byteOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff) - } - - records = append(records, extInfoRecord{uint64(byteOff), buf}) - } - - result[secName] = extInfo{ - recordSize, - records, - } - } -} - -// bpfCoreRelo matches `struct bpf_core_relo` from the kernel -type bpfCoreRelo struct { - InsnOff uint32 - TypeID TypeID - AccessStrOff uint32 - Kind COREKind -} - -type coreRelo struct { - insnOff uint32 - typeID TypeID - accessor coreAccessor - kind COREKind -} - -type coreRelos []coreRelo - -// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted -// by offset. -func (r coreRelos) append(other coreRelos, offset uint64) coreRelos { - result := make([]coreRelo, 0, len(r)+len(other)) - result = append(result, r...) - for _, relo := range other { - relo.insnOff += uint32(offset) - result = append(result, relo) - } - return result -} - -var extInfoReloSize = binary.Size(bpfCoreRelo{}) - -func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) { - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("read record size: %v", err) - } - - if recordSize != uint32(extInfoReloSize) { - return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) - } - - result := make(map[string]coreRelos) - for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - - var relos coreRelos - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var relo bpfCoreRelo - if err := binary.Read(r, bo, &relo); err != nil { - return nil, fmt.Errorf("section %v: read record: %v", secName, err) - } - - if relo.InsnOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff) - } - - accessorStr, err := strings.Lookup(relo.AccessStrOff) - if err != nil { - return nil, err - } - - accessor, err := parseCoreAccessor(accessorStr) - if err != nil { - return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) - } - - relos = append(relos, coreRelo{ - relo.InsnOff, - relo.TypeID, - accessor, - relo.Kind, - }) - } - - result[secName] = relos - } -} - -func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { - var infoHeader btfExtInfoSec - if err := binary.Read(r, bo, &infoHeader); err != nil { - return "", nil, fmt.Errorf("read ext info header: %w", err) - } - - secName, err := strings.Lookup(infoHeader.SecNameOff) - if err != nil { - return "", nil, fmt.Errorf("get section name: %w", err) - } - - if infoHeader.NumInfo == 0 { - return "", nil, fmt.Errorf("section %s has zero records", secName) - } - - return secName, &infoHeader, nil -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go deleted file mode 100644 index 37e043f..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package btf - -import ( - "bytes" - "encoding/binary" - - "github.com/cilium/ebpf/internal" -) - -func FuzzSpec(data []byte) int { - if len(data) < binary.Size(btfHeader{}) { - return -1 - } - - spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} - -func FuzzExtInfo(data []byte) int { - if len(data) < binary.Size(btfExtHeader{}) { - return -1 - } - - table := stringTable("\x00foo\x00barfoo\x00") - info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table) - if err != nil { - if info != nil { - panic("info is not nil") - } - return 0 - } - if info == nil { - panic("info is nil") - } - return 1 -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/vendor/github.com/cilium/ebpf/internal/btf/strings.go deleted file mode 100644 index 8782643..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/strings.go +++ /dev/null @@ -1,60 +0,0 @@ -package btf - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" -) - -type stringTable []byte - -func readStringTable(r io.Reader) (stringTable, error) { - contents, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("can't read string table: %v", err) - } - - if len(contents) < 1 { - return nil, errors.New("string table is empty") - } - - if contents[0] != '\x00' { - return nil, errors.New("first item in string table is non-empty") - } - - if contents[len(contents)-1] != '\x00' { - return nil, errors.New("string table isn't null terminated") - } - - return stringTable(contents), nil -} - -func (st stringTable) Lookup(offset uint32) (string, error) { - if int64(offset) > int64(^uint(0)>>1) { - return "", fmt.Errorf("offset %d overflows int", offset) - } - - pos := int(offset) - if pos >= len(st) { - return "", fmt.Errorf("offset %d is out of bounds", offset) - } - - if pos > 0 && st[pos-1] != '\x00' { - return "", fmt.Errorf("offset %d isn't start of a string", offset) - } - - str := st[pos:] - end := bytes.IndexByte(str, '\x00') - if end == -1 { - return "", fmt.Errorf("offset %d isn't null terminated", offset) - } - - return string(str[:end]), nil -} - -func (st stringTable) LookupName(offset uint32) (Name, error) { - str, err := st.Lookup(offset) - return Name(str), err -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/types.go b/vendor/github.com/cilium/ebpf/internal/btf/types.go deleted file mode 100644 index 62aa31b..0000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/types.go +++ /dev/null @@ -1,893 +0,0 @@ -package btf - -import ( - "fmt" - "math" - "strings" -) - -const maxTypeDepth = 32 - -// TypeID identifies a type in a BTF section. -type TypeID uint32 - -// ID implements part of the Type interface. -func (tid TypeID) ID() TypeID { - return tid -} - -// Type represents a type described by BTF. -type Type interface { - ID() TypeID - - String() string - - // Make a copy of the type, without copying Type members. - copy() Type - - // Enumerate all nested Types. Repeated calls must visit nested - // types in the same order. - walk(*typeDeque) -} - -// namedType is a type with a name. -// -// Most named types simply embed Name. -type namedType interface { - Type - name() string - essentialName() string -} - -// Name identifies a type. -// -// Anonymous types have an empty name. -type Name string - -func (n Name) name() string { - return string(n) -} - -func (n Name) essentialName() string { - return essentialName(string(n)) -} - -// Void is the unit type of BTF. -type Void struct{} - -func (v *Void) ID() TypeID { return 0 } -func (v *Void) String() string { return "void#0" } -func (v *Void) size() uint32 { return 0 } -func (v *Void) copy() Type { return (*Void)(nil) } -func (v *Void) walk(*typeDeque) {} - -type IntEncoding byte - -const ( - Signed IntEncoding = 1 << iota - Char - Bool -) - -// Int is an integer of a given length. -type Int struct { - TypeID - Name - - // The size of the integer in bytes. - Size uint32 - Encoding IntEncoding - // Offset is the starting bit offset. Currently always 0. - // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int - Offset uint32 - Bits byte -} - -var _ namedType = (*Int)(nil) - -func (i *Int) String() string { - var s strings.Builder - - switch { - case i.Encoding&Char != 0: - s.WriteString("char") - case i.Encoding&Bool != 0: - s.WriteString("bool") - default: - if i.Encoding&Signed == 0 { - s.WriteRune('u') - } - s.WriteString("int") - fmt.Fprintf(&s, "%d", i.Size*8) - } - - fmt.Fprintf(&s, "#%d", i.TypeID) - - if i.Bits > 0 { - fmt.Fprintf(&s, "[bits=%d]", i.Bits) - } - - return s.String() -} - -func (i *Int) size() uint32 { return i.Size } -func (i *Int) walk(*typeDeque) {} -func (i *Int) copy() Type { - cpy := *i - return &cpy -} - -func (i *Int) isBitfield() bool { - return i.Offset > 0 -} - -// Pointer is a pointer to another type. -type Pointer struct { - TypeID - Target Type -} - -func (p *Pointer) String() string { - return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID()) -} - -func (p *Pointer) size() uint32 { return 8 } -func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } -func (p *Pointer) copy() Type { - cpy := *p - return &cpy -} - -// Array is an array with a fixed number of elements. -type Array struct { - TypeID - Type Type - Nelems uint32 -} - -func (arr *Array) String() string { - return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems) -} - -func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) } -func (arr *Array) copy() Type { - cpy := *arr - return &cpy -} - -// Struct is a compound type of consecutive members. -type Struct struct { - TypeID - Name - // The size of the struct including padding, in bytes - Size uint32 - Members []Member -} - -func (s *Struct) String() string { - return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name) -} - -func (s *Struct) size() uint32 { return s.Size } - -func (s *Struct) walk(tdq *typeDeque) { - for i := range s.Members { - tdq.push(&s.Members[i].Type) - } -} - -func (s *Struct) copy() Type { - cpy := *s - cpy.Members = copyMembers(s.Members) - return &cpy -} - -func (s *Struct) members() []Member { - return s.Members -} - -// Union is a compound type where members occupy the same memory. -type Union struct { - TypeID - Name - // The size of the union including padding, in bytes. - Size uint32 - Members []Member -} - -func (u *Union) String() string { - return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name) -} - -func (u *Union) size() uint32 { return u.Size } - -func (u *Union) walk(tdq *typeDeque) { - for i := range u.Members { - tdq.push(&u.Members[i].Type) - } -} - -func (u *Union) copy() Type { - cpy := *u - cpy.Members = copyMembers(u.Members) - return &cpy -} - -func (u *Union) members() []Member { - return u.Members -} - -func copyMembers(orig []Member) []Member { - cpy := make([]Member, len(orig)) - copy(cpy, orig) - return cpy -} - -type composite interface { - members() []Member -} - -var ( - _ composite = (*Struct)(nil) - _ composite = (*Union)(nil) -) - -// Member is part of a Struct or Union. -// -// It is not a valid Type. -type Member struct { - Name - Type Type - // Offset is the bit offset of this member - Offset uint32 - BitfieldSize uint32 -} - -// Enum lists possible values. -type Enum struct { - TypeID - Name - Values []EnumValue -} - -func (e *Enum) String() string { - return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name) -} - -// EnumValue is part of an Enum -// -// Is is not a valid Type -type EnumValue struct { - Name - Value int32 -} - -func (e *Enum) size() uint32 { return 4 } -func (e *Enum) walk(*typeDeque) {} -func (e *Enum) copy() Type { - cpy := *e - cpy.Values = make([]EnumValue, len(e.Values)) - copy(cpy.Values, e.Values) - return &cpy -} - -// FwdKind is the type of forward declaration. -type FwdKind int - -// Valid types of forward declaration. -const ( - FwdStruct FwdKind = iota - FwdUnion -) - -func (fk FwdKind) String() string { - switch fk { - case FwdStruct: - return "struct" - case FwdUnion: - return "union" - default: - return fmt.Sprintf("%T(%d)", fk, int(fk)) - } -} - -// Fwd is a forward declaration of a Type. -type Fwd struct { - TypeID - Name - Kind FwdKind -} - -func (f *Fwd) String() string { - return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name) -} - -func (f *Fwd) walk(*typeDeque) {} -func (f *Fwd) copy() Type { - cpy := *f - return &cpy -} - -// Typedef is an alias of a Type. -type Typedef struct { - TypeID - Name - Type Type -} - -func (td *Typedef) String() string { - return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID()) -} - -func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } -func (td *Typedef) copy() Type { - cpy := *td - return &cpy -} - -// Volatile is a qualifier. -type Volatile struct { - TypeID - Type Type -} - -func (v *Volatile) String() string { - return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID()) -} - -func (v *Volatile) qualify() Type { return v.Type } -func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Volatile) copy() Type { - cpy := *v - return &cpy -} - -// Const is a qualifier. -type Const struct { - TypeID - Type Type -} - -func (c *Const) String() string { - return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID()) -} - -func (c *Const) qualify() Type { return c.Type } -func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } -func (c *Const) copy() Type { - cpy := *c - return &cpy -} - -// Restrict is a qualifier. -type Restrict struct { - TypeID - Type Type -} - -func (r *Restrict) String() string { - return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID()) -} - -func (r *Restrict) qualify() Type { return r.Type } -func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } -func (r *Restrict) copy() Type { - cpy := *r - return &cpy -} - -// Func is a function definition. -type Func struct { - TypeID - Name - Type Type - Linkage FuncLinkage -} - -func (f *Func) String() string { - return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID()) -} - -func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } -func (f *Func) copy() Type { - cpy := *f - return &cpy -} - -// FuncProto is a function declaration. -type FuncProto struct { - TypeID - Return Type - Params []FuncParam -} - -func (fp *FuncProto) String() string { - var s strings.Builder - fmt.Fprintf(&s, "proto#%d[", fp.TypeID) - for _, param := range fp.Params { - fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID()) - } - fmt.Fprintf(&s, "return=#%d]", fp.Return.ID()) - return s.String() -} - -func (fp *FuncProto) walk(tdq *typeDeque) { - tdq.push(&fp.Return) - for i := range fp.Params { - tdq.push(&fp.Params[i].Type) - } -} - -func (fp *FuncProto) copy() Type { - cpy := *fp - cpy.Params = make([]FuncParam, len(fp.Params)) - copy(cpy.Params, fp.Params) - return &cpy -} - -type FuncParam struct { - Name - Type Type -} - -// Var is a global variable. -type Var struct { - TypeID - Name - Type Type - Linkage VarLinkage -} - -func (v *Var) String() string { - return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name) -} - -func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Var) copy() Type { - cpy := *v - return &cpy -} - -// Datasec is a global program section containing data. -type Datasec struct { - TypeID - Name - Size uint32 - Vars []VarSecinfo -} - -func (ds *Datasec) String() string { - return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name) -} - -func (ds *Datasec) size() uint32 { return ds.Size } - -func (ds *Datasec) walk(tdq *typeDeque) { - for i := range ds.Vars { - tdq.push(&ds.Vars[i].Type) - } -} - -func (ds *Datasec) copy() Type { - cpy := *ds - cpy.Vars = make([]VarSecinfo, len(ds.Vars)) - copy(cpy.Vars, ds.Vars) - return &cpy -} - -// VarSecinfo describes variable in a Datasec -// -// It is not a valid Type. -type VarSecinfo struct { - Type Type - Offset uint32 - Size uint32 -} - -type sizer interface { - size() uint32 -} - -var ( - _ sizer = (*Int)(nil) - _ sizer = (*Pointer)(nil) - _ sizer = (*Struct)(nil) - _ sizer = (*Union)(nil) - _ sizer = (*Enum)(nil) - _ sizer = (*Datasec)(nil) -) - -type qualifier interface { - qualify() Type -} - -var ( - _ qualifier = (*Const)(nil) - _ qualifier = (*Restrict)(nil) - _ qualifier = (*Volatile)(nil) -) - -// Sizeof returns the size of a type in bytes. -// -// Returns an error if the size can't be computed. -func Sizeof(typ Type) (int, error) { - var ( - n = int64(1) - elem int64 - ) - - for i := 0; i < maxTypeDepth; i++ { - switch v := typ.(type) { - case *Array: - if n > 0 && int64(v.Nelems) > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - // Arrays may be of zero length, which allows - // n to be zero as well. - n *= int64(v.Nelems) - typ = v.Type - continue - - case sizer: - elem = int64(v.size()) - - case *Typedef: - typ = v.Type - continue - - case qualifier: - typ = v.qualify() - continue - - default: - return 0, fmt.Errorf("unsized type %T", typ) - } - - if n > 0 && elem > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - size := n * elem - if int64(int(size)) != size { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - return int(size), nil - } - - return 0, fmt.Errorf("type %s: exceeded type depth", typ) -} - -// copy a Type recursively. -// -// typ may form a cycle. -// -// Returns any errors from transform verbatim. -func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) { - var ( - copies = make(map[Type]Type) - work typeDeque - ) - - for t := &typ; t != nil; t = work.pop() { - // *t is the identity of the type. - if cpy := copies[*t]; cpy != nil { - *t = cpy - continue - } - - var cpy Type - if transform != nil { - tf, err := transform(*t) - if err != nil { - return nil, fmt.Errorf("copy %s: %w", typ, err) - } - cpy = tf.copy() - } else { - cpy = (*t).copy() - } - - copies[*t] = cpy - *t = cpy - - // Mark any nested types for copying. - cpy.walk(&work) - } - - return typ, nil -} - -// typeDeque keeps track of pointers to types which still -// need to be visited. -type typeDeque struct { - types []*Type - read, write uint64 - mask uint64 -} - -// push adds a type to the stack. -func (dq *typeDeque) push(t *Type) { - if dq.write-dq.read < uint64(len(dq.types)) { - dq.types[dq.write&dq.mask] = t - dq.write++ - return - } - - new := len(dq.types) * 2 - if new == 0 { - new = 8 - } - - types := make([]*Type, new) - pivot := dq.read & dq.mask - n := copy(types, dq.types[pivot:]) - n += copy(types[n:], dq.types[:pivot]) - types[n] = t - - dq.types = types - dq.mask = uint64(new) - 1 - dq.read, dq.write = 0, uint64(n+1) -} - -// shift returns the first element or null. -func (dq *typeDeque) shift() *Type { - if dq.read == dq.write { - return nil - } - - index := dq.read & dq.mask - t := dq.types[index] - dq.types[index] = nil - dq.read++ - return t -} - -// pop returns the last element or null. -func (dq *typeDeque) pop() *Type { - if dq.read == dq.write { - return nil - } - - dq.write-- - index := dq.write & dq.mask - t := dq.types[index] - dq.types[index] = nil - return t -} - -// all returns all elements. -// -// The deque is empty after calling this method. -func (dq *typeDeque) all() []*Type { - length := dq.write - dq.read - types := make([]*Type, 0, length) - for t := dq.shift(); t != nil; t = dq.shift() { - types = append(types, t) - } - return types -} - -// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns -// it into a graph of Types connected via pointers. -// -// Returns a map of named types (so, where NameOff is non-zero) and a slice of types -// indexed by TypeID. Since BTF ignores compilation units, multiple types may share -// the same name. A Type may form a cyclic graph by pointing at itself. -func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) { - type fixupDef struct { - id TypeID - expectedKind btfKind - typ *Type - } - - var fixups []fixupDef - fixup := func(id TypeID, expectedKind btfKind, typ *Type) { - fixups = append(fixups, fixupDef{id, expectedKind, typ}) - } - - convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { - // NB: The fixup below relies on pre-allocating this array to - // work, since otherwise append might re-allocate members. - members := make([]Member, 0, len(raw)) - for i, btfMember := range raw { - name, err := rawStrings.LookupName(btfMember.NameOff) - if err != nil { - return nil, fmt.Errorf("can't get name for member %d: %w", i, err) - } - m := Member{ - Name: name, - Offset: btfMember.Offset, - } - if kindFlag { - m.BitfieldSize = btfMember.Offset >> 24 - m.Offset &= 0xffffff - } - members = append(members, m) - } - for i := range members { - fixup(raw[i].Type, kindUnknown, &members[i].Type) - } - return members, nil - } - - types = make([]Type, 0, len(rawTypes)) - types = append(types, (*Void)(nil)) - namedTypes = make(map[string][]namedType) - - for i, raw := range rawTypes { - var ( - // Void is defined to always be type ID 0, and is thus - // omitted from BTF. - id = TypeID(i + 1) - typ Type - ) - - name, err := rawStrings.LookupName(raw.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err) - } - - switch raw.Kind() { - case kindInt: - encoding, offset, bits := intEncoding(*raw.data.(*uint32)) - typ = &Int{id, name, raw.Size(), encoding, offset, bits} - - case kindPointer: - ptr := &Pointer{id, nil} - fixup(raw.Type(), kindUnknown, &ptr.Target) - typ = ptr - - case kindArray: - btfArr := raw.data.(*btfArray) - - // IndexType is unused according to btf.rst. - // Don't make it available right now. - arr := &Array{id, nil, btfArr.Nelems} - fixup(btfArr.Type, kindUnknown, &arr.Type) - typ = arr - - case kindStruct: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) - } - typ = &Struct{id, name, raw.Size(), members} - - case kindUnion: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err) - } - typ = &Union{id, name, raw.Size(), members} - - case kindEnum: - rawvals := raw.data.([]btfEnum) - vals := make([]EnumValue, 0, len(rawvals)) - for i, btfVal := range rawvals { - name, err := rawStrings.LookupName(btfVal.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err) - } - vals = append(vals, EnumValue{ - Name: name, - Value: btfVal.Val, - }) - } - typ = &Enum{id, name, vals} - - case kindForward: - if raw.KindFlag() { - typ = &Fwd{id, name, FwdUnion} - } else { - typ = &Fwd{id, name, FwdStruct} - } - - case kindTypedef: - typedef := &Typedef{id, name, nil} - fixup(raw.Type(), kindUnknown, &typedef.Type) - typ = typedef - - case kindVolatile: - volatile := &Volatile{id, nil} - fixup(raw.Type(), kindUnknown, &volatile.Type) - typ = volatile - - case kindConst: - cnst := &Const{id, nil} - fixup(raw.Type(), kindUnknown, &cnst.Type) - typ = cnst - - case kindRestrict: - restrict := &Restrict{id, nil} - fixup(raw.Type(), kindUnknown, &restrict.Type) - typ = restrict - - case kindFunc: - fn := &Func{id, name, nil, raw.Linkage()} - fixup(raw.Type(), kindFuncProto, &fn.Type) - typ = fn - - case kindFuncProto: - rawparams := raw.data.([]btfParam) - params := make([]FuncParam, 0, len(rawparams)) - for i, param := range rawparams { - name, err := rawStrings.LookupName(param.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) - } - params = append(params, FuncParam{ - Name: name, - }) - } - for i := range params { - fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type) - } - - fp := &FuncProto{id, nil, params} - fixup(raw.Type(), kindUnknown, &fp.Return) - typ = fp - - case kindVar: - variable := raw.data.(*btfVariable) - v := &Var{id, name, nil, VarLinkage(variable.Linkage)} - fixup(raw.Type(), kindUnknown, &v.Type) - typ = v - - case kindDatasec: - btfVars := raw.data.([]btfVarSecinfo) - vars := make([]VarSecinfo, 0, len(btfVars)) - for _, btfVar := range btfVars { - vars = append(vars, VarSecinfo{ - Offset: btfVar.Offset, - Size: btfVar.Size, - }) - } - for i := range vars { - fixup(btfVars[i].Type, kindVar, &vars[i].Type) - } - typ = &Datasec{id, name, raw.SizeType, vars} - - default: - return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) - } - - types = append(types, typ) - - if named, ok := typ.(namedType); ok { - if name := essentialName(named.name()); name != "" { - namedTypes[name] = append(namedTypes[name], named) - } - } - } - - for _, fixup := range fixups { - i := int(fixup.id) - if i >= len(types) { - return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) - } - - // Default void (id 0) to unknown - rawKind := kindUnknown - if i > 0 { - rawKind = rawTypes[i-1].Kind() - } - - if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected { - return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind) - } - - *fixup.typ = types[i] - } - - return types, namedTypes, nil -} - -// essentialName returns name without a ___ suffix. -func essentialName(name string) string { - lastIdx := strings.LastIndex(name, "___") - if lastIdx > 0 { - return name[:lastIdx] - } - return name -} diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go deleted file mode 100644 index d3424ba..0000000 --- a/vendor/github.com/cilium/ebpf/internal/cpu.go +++ /dev/null @@ -1,62 +0,0 @@ -package internal - -import ( - "fmt" - "io/ioutil" - "strings" - "sync" -) - -var sysCPU struct { - once sync.Once - err error - num int -} - -// PossibleCPUs returns the max number of CPUs a system may possibly have -// Logical CPU numbers must be of the form 0-n -func PossibleCPUs() (int, error) { - sysCPU.once.Do(func() { - sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible") - }) - - return sysCPU.num, sysCPU.err -} - -func parseCPUsFromFile(path string) (int, error) { - spec, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - - n, err := parseCPUs(string(spec)) - if err != nil { - return 0, fmt.Errorf("can't parse %s: %v", path, err) - } - - return n, nil -} - -// parseCPUs parses the number of cpus from a string produced -// by bitmap_list_string() in the Linux kernel. -// Multiple ranges are rejected, since they can't be unified -// into a single number. -// This is the format of /sys/devices/system/cpu/possible, it -// is not suitable for /sys/devices/system/cpu/online, etc. -func parseCPUs(spec string) (int, error) { - if strings.Trim(spec, "\n") == "0" { - return 1, nil - } - - var low, high int - n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) - if n != 2 || err != nil { - return 0, fmt.Errorf("invalid format: %s", spec) - } - if low != 0 { - return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) - } - - // cpus is 0 indexed - return high + 1, nil -} diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go deleted file mode 100644 index 54a4313..0000000 --- a/vendor/github.com/cilium/ebpf/internal/elf.go +++ /dev/null @@ -1,68 +0,0 @@ -package internal - -import ( - "debug/elf" - "fmt" - "io" -) - -type SafeELFFile struct { - *elf.File -} - -// NewSafeELFFile reads an ELF safely. -// -// Any panic during parsing is turned into an error. This is necessary since -// there are a bunch of unfixed bugs in debug/elf. -// -// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle -func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { - defer func() { - r := recover() - if r == nil { - return - } - - safe = nil - err = fmt.Errorf("reading ELF file panicked: %s", r) - }() - - file, err := elf.NewFile(r) - if err != nil { - return nil, err - } - - return &SafeELFFile{file}, nil -} - -// Symbols is the safe version of elf.File.Symbols. -func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { - defer func() { - r := recover() - if r == nil { - return - } - - syms = nil - err = fmt.Errorf("reading ELF symbols panicked: %s", r) - }() - - syms, err = se.File.Symbols() - return -} - -// DynamicSymbols is the safe version of elf.File.DynamicSymbols. -func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { - defer func() { - r := recover() - if r == nil { - return - } - - syms = nil - err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r) - }() - - syms, err = se.File.DynamicSymbols() - return -} diff --git a/vendor/github.com/cilium/ebpf/internal/endian.go b/vendor/github.com/cilium/ebpf/internal/endian.go deleted file mode 100644 index 6ae99fc..0000000 --- a/vendor/github.com/cilium/ebpf/internal/endian.go +++ /dev/null @@ -1,29 +0,0 @@ -package internal - -import ( - "encoding/binary" - "unsafe" -) - -// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, -// depending on the host's endianness. -var NativeEndian binary.ByteOrder - -// Clang is set to either "el" or "eb" depending on the host's endianness. -var ClangEndian string - -func init() { - if isBigEndian() { - NativeEndian = binary.BigEndian - ClangEndian = "eb" - } else { - NativeEndian = binary.LittleEndian - ClangEndian = "el" - } -} - -func isBigEndian() (ret bool) { - i := int(0x1) - bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i)) - return bs[0] == 0 -} diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go deleted file mode 100644 index 877bd72..0000000 --- a/vendor/github.com/cilium/ebpf/internal/errors.go +++ /dev/null @@ -1,51 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/cilium/ebpf/internal/unix" -) - -// ErrorWithLog returns an error that includes logs from the -// kernel verifier. -// -// logErr should be the error returned by the syscall that generated -// the log. It is used to check for truncation of the output. -func ErrorWithLog(err error, log []byte, logErr error) error { - logStr := strings.Trim(CString(log), "\t\r\n ") - if errors.Is(logErr, unix.ENOSPC) { - logStr += " (truncated...)" - } - - return &VerifierError{err, logStr} -} - -// VerifierError includes information from the eBPF verifier. -type VerifierError struct { - cause error - log string -} - -func (le *VerifierError) Unwrap() error { - return le.cause -} - -func (le *VerifierError) Error() string { - if le.log == "" { - return le.cause.Error() - } - - return fmt.Sprintf("%s: %s", le.cause, le.log) -} - -// CString turns a NUL / zero terminated byte buffer into a string. -func CString(in []byte) string { - inLen := bytes.IndexByte(in, 0) - if inLen == -1 { - return "" - } - return string(in[:inLen]) -} diff --git a/vendor/github.com/cilium/ebpf/internal/fd.go b/vendor/github.com/cilium/ebpf/internal/fd.go deleted file mode 100644 index af04955..0000000 --- a/vendor/github.com/cilium/ebpf/internal/fd.go +++ /dev/null @@ -1,69 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "github.com/cilium/ebpf/internal/unix" -) - -var ErrClosedFd = errors.New("use of closed file descriptor") - -type FD struct { - raw int64 -} - -func NewFD(value uint32) *FD { - fd := &FD{int64(value)} - runtime.SetFinalizer(fd, (*FD).Close) - return fd -} - -func (fd *FD) String() string { - return strconv.FormatInt(fd.raw, 10) -} - -func (fd *FD) Value() (uint32, error) { - if fd.raw < 0 { - return 0, ErrClosedFd - } - - return uint32(fd.raw), nil -} - -func (fd *FD) Close() error { - if fd.raw < 0 { - return nil - } - - value := int(fd.raw) - fd.raw = -1 - - fd.Forget() - return unix.Close(value) -} - -func (fd *FD) Forget() { - runtime.SetFinalizer(fd, nil) -} - -func (fd *FD) Dup() (*FD, error) { - if fd.raw < 0 { - return nil, ErrClosedFd - } - - dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, fmt.Errorf("can't dup fd: %v", err) - } - - return NewFD(uint32(dup)), nil -} - -func (fd *FD) File(name string) *os.File { - fd.Forget() - return os.NewFile(uintptr(fd.raw), name) -} diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go deleted file mode 100644 index c94a2e1..0000000 --- a/vendor/github.com/cilium/ebpf/internal/feature.go +++ /dev/null @@ -1,100 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "sync" -) - -// ErrNotSupported indicates that a feature is not supported by the current kernel. -var ErrNotSupported = errors.New("not supported") - -// UnsupportedFeatureError is returned by FeatureTest() functions. -type UnsupportedFeatureError struct { - // The minimum Linux mainline version required for this feature. - // Used for the error string, and for sanity checking during testing. - MinimumVersion Version - - // The name of the feature that isn't supported. - Name string -} - -func (ufe *UnsupportedFeatureError) Error() string { - if ufe.MinimumVersion.Unspecified() { - return fmt.Sprintf("%s not supported", ufe.Name) - } - return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion) -} - -// Is indicates that UnsupportedFeatureError is ErrNotSupported. -func (ufe *UnsupportedFeatureError) Is(target error) bool { - return target == ErrNotSupported -} - -type featureTest struct { - sync.RWMutex - successful bool - result error -} - -// FeatureTestFn is used to determine whether the kernel supports -// a certain feature. -// -// The return values have the following semantics: -// -// err == ErrNotSupported: the feature is not available -// err == nil: the feature is available -// err != nil: the test couldn't be executed -type FeatureTestFn func() error - -// FeatureTest wraps a function so that it is run at most once. -// -// name should identify the tested feature, while version must be in the -// form Major.Minor[.Patch]. -// -// Returns an error wrapping ErrNotSupported if the feature is not supported. -func FeatureTest(name, version string, fn FeatureTestFn) func() error { - v, err := NewVersion(version) - if err != nil { - return func() error { return err } - } - - ft := new(featureTest) - return func() error { - ft.RLock() - if ft.successful { - defer ft.RUnlock() - return ft.result - } - ft.RUnlock() - ft.Lock() - defer ft.Unlock() - // check one more time on the off - // chance that two go routines - // were able to call into the write - // lock - if ft.successful { - return ft.result - } - err := fn() - switch { - case errors.Is(err, ErrNotSupported): - ft.result = &UnsupportedFeatureError{ - MinimumVersion: v, - Name: name, - } - fallthrough - - case err == nil: - ft.successful = true - - default: - // We couldn't execute the feature test to a point - // where it could make a determination. - // Don't cache the result, just return it. - return fmt.Errorf("detect support for %s: %w", name, err) - } - - return ft.result - } -} diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go deleted file mode 100644 index fa74027..0000000 --- a/vendor/github.com/cilium/ebpf/internal/io.go +++ /dev/null @@ -1,16 +0,0 @@ -package internal - -import "errors" - -// DiscardZeroes makes sure that all written bytes are zero -// before discarding them. -type DiscardZeroes struct{} - -func (DiscardZeroes) Write(p []byte) (int, error) { - for _, b := range p { - if b != 0 { - return 0, errors.New("encountered non-zero byte") - } - } - return len(p), nil -} diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go deleted file mode 100644 index 5329b43..0000000 --- a/vendor/github.com/cilium/ebpf/internal/pinning.go +++ /dev/null @@ -1,44 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "os" - - "github.com/cilium/ebpf/internal/unix" -) - -func Pin(currentPath, newPath string, fd *FD) error { - if newPath == "" { - return errors.New("given pinning path cannot be empty") - } - if currentPath == newPath { - return nil - } - if currentPath == "" { - return BPFObjPin(newPath, fd) - } - var err error - // Renameat2 is used instead of os.Rename to disallow the new path replacing - // an existing path. - if err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE); err == nil { - // Object is now moved to the new pinning path. - return nil - } - if !os.IsNotExist(err) { - return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) - } - // Internal state not in sync with the file system so let's fix it. - return BPFObjPin(newPath, fd) -} - -func Unpin(pinnedPath string) error { - if pinnedPath == "" { - return nil - } - err := os.Remove(pinnedPath) - if err == nil || os.IsNotExist(err) { - return nil - } - return err -} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr.go b/vendor/github.com/cilium/ebpf/internal/ptr.go deleted file mode 100644 index f295de7..0000000 --- a/vendor/github.com/cilium/ebpf/internal/ptr.go +++ /dev/null @@ -1,31 +0,0 @@ -package internal - -import ( - "unsafe" - - "github.com/cilium/ebpf/internal/unix" -) - -// NewPointer creates a 64-bit pointer from an unsafe Pointer. -func NewPointer(ptr unsafe.Pointer) Pointer { - return Pointer{ptr: ptr} -} - -// NewSlicePointer creates a 64-bit pointer from a byte slice. -func NewSlicePointer(buf []byte) Pointer { - if len(buf) == 0 { - return Pointer{} - } - - return Pointer{ptr: unsafe.Pointer(&buf[0])} -} - -// NewStringPointer creates a 64-bit pointer from a string. -func NewStringPointer(str string) Pointer { - p, err := unix.BytePtrFromString(str) - if err != nil { - return Pointer{} - } - - return Pointer{ptr: unsafe.Pointer(p)} -} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go deleted file mode 100644 index a56fbcc..0000000 --- a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build armbe mips mips64p32 - -package internal - -import ( - "unsafe" -) - -// Pointer wraps an unsafe.Pointer to be 64bit to -// conform to the syscall specification. -type Pointer struct { - pad uint32 - ptr unsafe.Pointer -} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go deleted file mode 100644 index be2ecfc..0000000 --- a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build 386 amd64p32 arm mipsle mips64p32le - -package internal - -import ( - "unsafe" -) - -// Pointer wraps an unsafe.Pointer to be 64bit to -// conform to the syscall specification. -type Pointer struct { - ptr unsafe.Pointer - pad uint32 -} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/ptr_64.go deleted file mode 100644 index 69452dc..0000000 --- a/vendor/github.com/cilium/ebpf/internal/ptr_64.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le -// +build !armbe,!mips,!mips64p32 - -package internal - -import ( - "unsafe" -) - -// Pointer wraps an unsafe.Pointer to be 64bit to -// conform to the syscall specification. -type Pointer struct { - ptr unsafe.Pointer -} diff --git a/vendor/github.com/cilium/ebpf/internal/syscall.go b/vendor/github.com/cilium/ebpf/internal/syscall.go deleted file mode 100644 index b766e64..0000000 --- a/vendor/github.com/cilium/ebpf/internal/syscall.go +++ /dev/null @@ -1,245 +0,0 @@ -package internal - -import ( - "fmt" - "path/filepath" - "runtime" - "syscall" - "unsafe" - - "github.com/cilium/ebpf/internal/unix" -) - -//go:generate stringer -output syscall_string.go -type=BPFCmd - -// BPFCmd identifies a subcommand of the bpf syscall. -type BPFCmd int - -// Well known BPF commands. -const ( - BPF_MAP_CREATE BPFCmd = iota - BPF_MAP_LOOKUP_ELEM - BPF_MAP_UPDATE_ELEM - BPF_MAP_DELETE_ELEM - BPF_MAP_GET_NEXT_KEY - BPF_PROG_LOAD - BPF_OBJ_PIN - BPF_OBJ_GET - BPF_PROG_ATTACH - BPF_PROG_DETACH - BPF_PROG_TEST_RUN - BPF_PROG_GET_NEXT_ID - BPF_MAP_GET_NEXT_ID - BPF_PROG_GET_FD_BY_ID - BPF_MAP_GET_FD_BY_ID - BPF_OBJ_GET_INFO_BY_FD - BPF_PROG_QUERY - BPF_RAW_TRACEPOINT_OPEN - BPF_BTF_LOAD - BPF_BTF_GET_FD_BY_ID - BPF_TASK_FD_QUERY - BPF_MAP_LOOKUP_AND_DELETE_ELEM - BPF_MAP_FREEZE - BPF_BTF_GET_NEXT_ID - BPF_MAP_LOOKUP_BATCH - BPF_MAP_LOOKUP_AND_DELETE_BATCH - BPF_MAP_UPDATE_BATCH - BPF_MAP_DELETE_BATCH - BPF_LINK_CREATE - BPF_LINK_UPDATE - BPF_LINK_GET_FD_BY_ID - BPF_LINK_GET_NEXT_ID - BPF_ENABLE_STATS - BPF_ITER_CREATE -) - -// BPF wraps SYS_BPF. -// -// Any pointers contained in attr must use the Pointer type from this package. -func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { - r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) - runtime.KeepAlive(attr) - - var err error - if errNo != 0 { - err = wrappedErrno{errNo} - } - - return r1, err -} - -type BPFProgAttachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 - AttachFlags uint32 - ReplaceBpfFd uint32 -} - -func BPFProgAttach(attr *BPFProgAttachAttr) error { - _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFProgDetachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 -} - -func BPFProgDetach(attr *BPFProgDetachAttr) error { - _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFEnableStatsAttr struct { - StatsType uint32 -} - -func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) { - ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, fmt.Errorf("enable stats: %w", err) - } - return NewFD(uint32(ptr)), nil - -} - -type bpfObjAttr struct { - fileName Pointer - fd uint32 - fileFlags uint32 -} - -const bpfFSType = 0xcafe4a11 - -// BPFObjPin wraps BPF_OBJ_PIN. -func BPFObjPin(fileName string, fd *FD) error { - dirName := filepath.Dir(fileName) - var statfs unix.Statfs_t - if err := unix.Statfs(dirName, &statfs); err != nil { - return err - } - if uint64(statfs.Type) != bpfFSType { - return fmt.Errorf("%s is not on a bpf filesystem", fileName) - } - - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fd: value, - } - _, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("pin object %s: %w", fileName, err) - } - return nil -} - -// BPFObjGet wraps BPF_OBJ_GET. -func BPFObjGet(fileName string, flags uint32) (*FD, error) { - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fileFlags: flags, - } - ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return nil, fmt.Errorf("get object %s: %w", fileName, err) - } - return NewFD(uint32(ptr)), nil -} - -type bpfObjGetInfoByFDAttr struct { - fd uint32 - infoLen uint32 - info Pointer -} - -// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD. -// -// Available from 4.13. -func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error { - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjGetInfoByFDAttr{ - fd: value, - infoLen: uint32(size), - info: NewPointer(info), - } - _, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("fd %v: %w", fd, err) - } - return nil -} - -// BPFObjName is a null-terminated string made up of -// 'A-Za-z0-9_' characters. -type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte - -// NewBPFObjName truncates the result if it is too long. -func NewBPFObjName(name string) BPFObjName { - var result BPFObjName - copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) - return result -} - -type BPFMapCreateAttr struct { - MapType uint32 - KeySize uint32 - ValueSize uint32 - MaxEntries uint32 - Flags uint32 - InnerMapFd uint32 // since 4.12 56f668dfe00d - NumaNode uint32 // since 4.14 96eabe7a40aa - MapName BPFObjName // since 4.15 ad5b177bd73f - MapIfIndex uint32 - BTFFd uint32 - BTFKeyTypeID uint32 - BTFValueTypeID uint32 -} - -func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) { - fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return NewFD(uint32(fd)), nil -} - -// wrappedErrno wraps syscall.Errno to prevent direct comparisons with -// syscall.E* or unix.E* constants. -// -// You should never export an error of this type. -type wrappedErrno struct { - syscall.Errno -} - -func (we wrappedErrno) Unwrap() error { - return we.Errno -} - -type syscallError struct { - error - errno syscall.Errno -} - -func SyscallError(err error, errno syscall.Errno) error { - return &syscallError{err, errno} -} - -func (se *syscallError) Is(target error) bool { - return target == se.error -} - -func (se *syscallError) Unwrap() error { - return se.errno -} diff --git a/vendor/github.com/cilium/ebpf/internal/syscall_string.go b/vendor/github.com/cilium/ebpf/internal/syscall_string.go deleted file mode 100644 index 85df047..0000000 --- a/vendor/github.com/cilium/ebpf/internal/syscall_string.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT. - -package internal - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[BPF_MAP_CREATE-0] - _ = x[BPF_MAP_LOOKUP_ELEM-1] - _ = x[BPF_MAP_UPDATE_ELEM-2] - _ = x[BPF_MAP_DELETE_ELEM-3] - _ = x[BPF_MAP_GET_NEXT_KEY-4] - _ = x[BPF_PROG_LOAD-5] - _ = x[BPF_OBJ_PIN-6] - _ = x[BPF_OBJ_GET-7] - _ = x[BPF_PROG_ATTACH-8] - _ = x[BPF_PROG_DETACH-9] - _ = x[BPF_PROG_TEST_RUN-10] - _ = x[BPF_PROG_GET_NEXT_ID-11] - _ = x[BPF_MAP_GET_NEXT_ID-12] - _ = x[BPF_PROG_GET_FD_BY_ID-13] - _ = x[BPF_MAP_GET_FD_BY_ID-14] - _ = x[BPF_OBJ_GET_INFO_BY_FD-15] - _ = x[BPF_PROG_QUERY-16] - _ = x[BPF_RAW_TRACEPOINT_OPEN-17] - _ = x[BPF_BTF_LOAD-18] - _ = x[BPF_BTF_GET_FD_BY_ID-19] - _ = x[BPF_TASK_FD_QUERY-20] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21] - _ = x[BPF_MAP_FREEZE-22] - _ = x[BPF_BTF_GET_NEXT_ID-23] - _ = x[BPF_MAP_LOOKUP_BATCH-24] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25] - _ = x[BPF_MAP_UPDATE_BATCH-26] - _ = x[BPF_MAP_DELETE_BATCH-27] - _ = x[BPF_LINK_CREATE-28] - _ = x[BPF_LINK_UPDATE-29] - _ = x[BPF_LINK_GET_FD_BY_ID-30] - _ = x[BPF_LINK_GET_NEXT_ID-31] - _ = x[BPF_ENABLE_STATS-32] - _ = x[BPF_ITER_CREATE-33] -} - -const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE" - -var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617} - -func (i BPFCmd) String() string { - if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) { - return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]] -} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go deleted file mode 100644 index 0a18eaf..0000000 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ /dev/null @@ -1,204 +0,0 @@ -// +build linux - -package unix - -import ( - "bytes" - "syscall" - - linux "golang.org/x/sys/unix" -) - -const ( - ENOENT = linux.ENOENT - EEXIST = linux.EEXIST - EAGAIN = linux.EAGAIN - ENOSPC = linux.ENOSPC - EINVAL = linux.EINVAL - EPOLLIN = linux.EPOLLIN - EINTR = linux.EINTR - EPERM = linux.EPERM - ESRCH = linux.ESRCH - ENODEV = linux.ENODEV - // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP - ENOTSUPP = syscall.Errno(0x20c) - - EBADF = linux.EBADF - BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC - BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE - BPF_F_RDONLY = linux.BPF_F_RDONLY - BPF_F_WRONLY = linux.BPF_F_WRONLY - BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG - BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG - BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE - BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE - BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP - BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN - BPF_TAG_SIZE = linux.BPF_TAG_SIZE - SYS_BPF = linux.SYS_BPF - F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC - EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD - EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC - O_CLOEXEC = linux.O_CLOEXEC - O_NONBLOCK = linux.O_NONBLOCK - PROT_READ = linux.PROT_READ - PROT_WRITE = linux.PROT_WRITE - MAP_SHARED = linux.MAP_SHARED - PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 - PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE - PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT - PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT - PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE - PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE - PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF - PerfBitWatermark = linux.PerfBitWatermark - PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW - PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC - RLIM_INFINITY = linux.RLIM_INFINITY - RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK - BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME - PERF_RECORD_LOST = linux.PERF_RECORD_LOST - PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE - AT_FDCWD = linux.AT_FDCWD - RENAME_NOREPLACE = linux.RENAME_NOREPLACE -) - -// Statfs_t is a wrapper -type Statfs_t = linux.Statfs_t - -// Rlimit is a wrapper -type Rlimit = linux.Rlimit - -// Setrlimit is a wrapper -func Setrlimit(resource int, rlim *Rlimit) (err error) { - return linux.Setrlimit(resource, rlim) -} - -// Syscall is a wrapper -func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - return linux.Syscall(trap, a1, a2, a3) -} - -// FcntlInt is a wrapper -func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - return linux.FcntlInt(fd, cmd, arg) -} - -// IoctlSetInt is a wrapper -func IoctlSetInt(fd int, req uint, value int) error { - return linux.IoctlSetInt(fd, req, value) -} - -// Statfs is a wrapper -func Statfs(path string, buf *Statfs_t) (err error) { - return linux.Statfs(path, buf) -} - -// Close is a wrapper -func Close(fd int) (err error) { - return linux.Close(fd) -} - -// EpollEvent is a wrapper -type EpollEvent = linux.EpollEvent - -// EpollWait is a wrapper -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return linux.EpollWait(epfd, events, msec) -} - -// EpollCtl is a wrapper -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return linux.EpollCtl(epfd, op, fd, event) -} - -// Eventfd is a wrapper -func Eventfd(initval uint, flags int) (fd int, err error) { - return linux.Eventfd(initval, flags) -} - -// Write is a wrapper -func Write(fd int, p []byte) (n int, err error) { - return linux.Write(fd, p) -} - -// EpollCreate1 is a wrapper -func EpollCreate1(flag int) (fd int, err error) { - return linux.EpollCreate1(flag) -} - -// PerfEventMmapPage is a wrapper -type PerfEventMmapPage linux.PerfEventMmapPage - -// SetNonblock is a wrapper -func SetNonblock(fd int, nonblocking bool) (err error) { - return linux.SetNonblock(fd, nonblocking) -} - -// Mmap is a wrapper -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return linux.Mmap(fd, offset, length, prot, flags) -} - -// Munmap is a wrapper -func Munmap(b []byte) (err error) { - return linux.Munmap(b) -} - -// PerfEventAttr is a wrapper -type PerfEventAttr = linux.PerfEventAttr - -// PerfEventOpen is a wrapper -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) -} - -// Utsname is a wrapper -type Utsname = linux.Utsname - -// Uname is a wrapper -func Uname(buf *Utsname) (err error) { - return linux.Uname(buf) -} - -// Getpid is a wrapper -func Getpid() int { - return linux.Getpid() -} - -// Gettid is a wrapper -func Gettid() int { - return linux.Gettid() -} - -// Tgkill is a wrapper -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - return linux.Tgkill(tgid, tid, sig) -} - -// BytePtrFromString is a wrapper -func BytePtrFromString(s string) (*byte, error) { - return linux.BytePtrFromString(s) -} - -// ByteSliceToString is a wrapper -func ByteSliceToString(s []byte) string { - return linux.ByteSliceToString(s) -} - -// Renameat2 is a wrapper -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { - return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) -} - -func KernelRelease() (string, error) { - var uname Utsname - err := Uname(&uname) - if err != nil { - return "", err - } - - end := bytes.IndexByte(uname.Release[:], 0) - release := string(uname.Release[:end]) - return release, nil -} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go deleted file mode 100644 index 1b06def..0000000 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build !linux - -package unix - -import ( - "fmt" - "runtime" - "syscall" -) - -var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - -const ( - ENOENT = syscall.ENOENT - EEXIST = syscall.EEXIST - EAGAIN = syscall.EAGAIN - ENOSPC = syscall.ENOSPC - EINVAL = syscall.EINVAL - EINTR = syscall.EINTR - EPERM = syscall.EPERM - ESRCH = syscall.ESRCH - ENODEV = syscall.ENODEV - EBADF = syscall.Errno(0) - // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP - ENOTSUPP = syscall.Errno(0x20c) - - BPF_F_NO_PREALLOC = 0 - BPF_F_NUMA_NODE = 0 - BPF_F_RDONLY = 0 - BPF_F_WRONLY = 0 - BPF_F_RDONLY_PROG = 0 - BPF_F_WRONLY_PROG = 0 - BPF_F_SLEEPABLE = 0 - BPF_F_MMAPABLE = 0 - BPF_F_INNER_MAP = 0 - BPF_OBJ_NAME_LEN = 0x10 - BPF_TAG_SIZE = 0x8 - SYS_BPF = 321 - F_DUPFD_CLOEXEC = 0x406 - EPOLLIN = 0x1 - EPOLL_CTL_ADD = 0x1 - EPOLL_CLOEXEC = 0x80000 - O_CLOEXEC = 0x80000 - O_NONBLOCK = 0x800 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - MAP_SHARED = 0x1 - PERF_ATTR_SIZE_VER1 = 0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - PERF_EVENT_IOC_DISABLE = 0 - PERF_EVENT_IOC_ENABLE = 0 - PERF_EVENT_IOC_SET_BPF = 0 - PerfBitWatermark = 0x4000 - PERF_SAMPLE_RAW = 0x400 - PERF_FLAG_FD_CLOEXEC = 0x8 - RLIM_INFINITY = 0x7fffffffffffffff - RLIMIT_MEMLOCK = 8 - BPF_STATS_RUN_TIME = 0 - PERF_RECORD_LOST = 2 - PERF_RECORD_SAMPLE = 9 - AT_FDCWD = -0x2 - RENAME_NOREPLACE = 0x1 -) - -// Statfs_t is a wrapper -type Statfs_t struct { - Type int64 - Bsize int64 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid [2]int32 - Namelen int64 - Frsize int64 - Flags int64 - Spare [4]int64 -} - -// Rlimit is a wrapper -type Rlimit struct { - Cur uint64 - Max uint64 -} - -// Setrlimit is a wrapper -func Setrlimit(resource int, rlim *Rlimit) (err error) { - return errNonLinux -} - -// Syscall is a wrapper -func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - return 0, 0, syscall.Errno(1) -} - -// FcntlInt is a wrapper -func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - return -1, errNonLinux -} - -// IoctlSetInt is a wrapper -func IoctlSetInt(fd int, req uint, value int) error { - return errNonLinux -} - -// Statfs is a wrapper -func Statfs(path string, buf *Statfs_t) error { - return errNonLinux -} - -// Close is a wrapper -func Close(fd int) (err error) { - return errNonLinux -} - -// EpollEvent is a wrapper -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -// EpollWait is a wrapper -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return 0, errNonLinux -} - -// EpollCtl is a wrapper -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return errNonLinux -} - -// Eventfd is a wrapper -func Eventfd(initval uint, flags int) (fd int, err error) { - return 0, errNonLinux -} - -// Write is a wrapper -func Write(fd int, p []byte) (n int, err error) { - return 0, errNonLinux -} - -// EpollCreate1 is a wrapper -func EpollCreate1(flag int) (fd int, err error) { - return 0, errNonLinux -} - -// PerfEventMmapPage is a wrapper -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -// SetNonblock is a wrapper -func SetNonblock(fd int, nonblocking bool) (err error) { - return errNonLinux -} - -// Mmap is a wrapper -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return []byte{}, errNonLinux -} - -// Munmap is a wrapper -func Munmap(b []byte) (err error) { - return errNonLinux -} - -// PerfEventAttr is a wrapper -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 -} - -// PerfEventOpen is a wrapper -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - return 0, errNonLinux -} - -// Utsname is a wrapper -type Utsname struct { - Release [65]byte - Version [65]byte -} - -// Uname is a wrapper -func Uname(buf *Utsname) (err error) { - return errNonLinux -} - -// Getpid is a wrapper -func Getpid() int { - return -1 -} - -// Gettid is a wrapper -func Gettid() int { - return -1 -} - -// Tgkill is a wrapper -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - return errNonLinux -} - -// BytePtrFromString is a wrapper -func BytePtrFromString(s string) (*byte, error) { - return nil, errNonLinux -} - -// ByteSliceToString is a wrapper -func ByteSliceToString(s []byte) string { - return "" -} - -// Renameat2 is a wrapper -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { - return errNonLinux -} - -func KernelRelease() (string, error) { - return "", errNonLinux -} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go deleted file mode 100644 index 1a678bf..0000000 --- a/vendor/github.com/cilium/ebpf/internal/version.go +++ /dev/null @@ -1,163 +0,0 @@ -package internal - -import ( - "fmt" - "io/ioutil" - "regexp" - "sync" - - "github.com/cilium/ebpf/internal/unix" -) - -const ( - // Version constant used in ELF binaries indicating that the loader needs to - // substitute the eBPF program's version with the value of the kernel's - // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf - // and RedSift. - MagicKernelVersion = 0xFFFFFFFE -) - -var ( - // Match between one and three decimals separated by dots, with the last - // segment (patch level) being optional on some kernels. - // The x.y.z string must appear at the start of a string or right after - // whitespace to prevent sequences like 'x.y.z-a.b.c' from matching 'a.b.c'. - rgxKernelVersion = regexp.MustCompile(`(?:\A|\s)\d{1,3}\.\d{1,3}(?:\.\d{1,3})?`) - - kernelVersion = struct { - once sync.Once - version Version - err error - }{} -) - -// A Version in the form Major.Minor.Patch. -type Version [3]uint16 - -// NewVersion creates a version from a string like "Major.Minor.Patch". -// -// Patch is optional. -func NewVersion(ver string) (Version, error) { - var major, minor, patch uint16 - n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) - if n < 2 { - return Version{}, fmt.Errorf("invalid version: %s", ver) - } - return Version{major, minor, patch}, nil -} - -func (v Version) String() string { - if v[2] == 0 { - return fmt.Sprintf("v%d.%d", v[0], v[1]) - } - return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) -} - -// Less returns true if the version is less than another version. -func (v Version) Less(other Version) bool { - for i, a := range v { - if a == other[i] { - continue - } - return a < other[i] - } - return false -} - -// Unspecified returns true if the version is all zero. -func (v Version) Unspecified() bool { - return v[0] == 0 && v[1] == 0 && v[2] == 0 -} - -// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h. -// It represents the kernel version and patch level as a single value. -func (v Version) Kernel() uint32 { - - // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid - // overflowing into PATCHLEVEL. - // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). - s := v[2] - if s > 255 { - s = 255 - } - - // Truncate members to uint8 to prevent them from spilling over into - // each other when overflowing 8 bits. - return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) -} - -// KernelVersion returns the version of the currently running kernel. -func KernelVersion() (Version, error) { - kernelVersion.once.Do(func() { - kernelVersion.version, kernelVersion.err = detectKernelVersion() - }) - - if kernelVersion.err != nil { - return Version{}, kernelVersion.err - } - return kernelVersion.version, nil -} - -// detectKernelVersion returns the version of the running kernel. It scans the -// following sources in order: /proc/version_signature, uname -v, uname -r. -// In each of those locations, the last-appearing x.y(.z) value is selected -// for parsing. The first location that yields a usable version number is -// returned. -func detectKernelVersion() (Version, error) { - - // Try reading /proc/version_signature for Ubuntu compatibility. - // Example format: Ubuntu 4.15.0-91.92-generic 4.15.18 - // This method exists in the kernel itself, see d18acd15c - // ("perf tools: Fix kernel version error in ubuntu"). - if pvs, err := ioutil.ReadFile("/proc/version_signature"); err == nil { - // If /proc/version_signature exists, failing to parse it is an error. - // It only exists on Ubuntu, where the real patch level is not obtainable - // through any other method. - v, err := findKernelVersion(string(pvs)) - if err != nil { - return Version{}, err - } - return v, nil - } - - var uname unix.Utsname - if err := unix.Uname(&uname); err != nil { - return Version{}, fmt.Errorf("calling uname: %w", err) - } - - // Debian puts the version including the patch level in uname.Version. - // It is not an error if there's no version number in uname.Version, - // as most distributions don't use it. Parsing can continue on uname.Release. - // Example format: #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08) - if v, err := findKernelVersion(unix.ByteSliceToString(uname.Version[:])); err == nil { - return v, nil - } - - // Most other distributions have the full kernel version including patch - // level in uname.Release. - // Example format: 4.19.0-5-amd64, 5.5.10-arch1-1 - v, err := findKernelVersion(unix.ByteSliceToString(uname.Release[:])) - if err != nil { - return Version{}, err - } - - return v, nil -} - -// findKernelVersion matches s against rgxKernelVersion and parses the result -// into a Version. If s contains multiple matches, the last entry is selected. -func findKernelVersion(s string) (Version, error) { - m := rgxKernelVersion.FindAllString(s, -1) - if m == nil { - return Version{}, fmt.Errorf("no kernel version in string: %s", s) - } - // Pick the last match of the string in case there are multiple. - s = m[len(m)-1] - - v, err := NewVersion(s) - if err != nil { - return Version{}, fmt.Errorf("parsing version string %s: %w", s, err) - } - - return v, nil -} diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go deleted file mode 100644 index 5540bb0..0000000 --- a/vendor/github.com/cilium/ebpf/link/cgroup.go +++ /dev/null @@ -1,171 +0,0 @@ -package link - -import ( - "errors" - "fmt" - "os" - - "github.com/cilium/ebpf" -) - -type cgroupAttachFlags uint32 - -// cgroup attach flags -const ( - flagAllowOverride cgroupAttachFlags = 1 << iota - flagAllowMulti - flagReplace -) - -type CgroupOptions struct { - // Path to a cgroupv2 folder. - Path string - // One of the AttachCgroup* constants - Attach ebpf.AttachType - // Program must be of type CGroup*, and the attach type must match Attach. - Program *ebpf.Program -} - -// AttachCgroup links a BPF program to a cgroup. -func AttachCgroup(opts CgroupOptions) (Link, error) { - cgroup, err := os.Open(opts.Path) - if err != nil { - return nil, fmt.Errorf("can't open cgroup: %s", err) - } - - clone, err := opts.Program.Clone() - if err != nil { - cgroup.Close() - return nil, err - } - - var cg Link - cg, err = newLinkCgroup(cgroup, opts.Attach, clone) - if errors.Is(err, ErrNotSupported) { - cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti) - } - if errors.Is(err, ErrNotSupported) { - cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride) - } - if err != nil { - cgroup.Close() - clone.Close() - return nil, err - } - - return cg, nil -} - -// LoadPinnedCgroup loads a pinned cgroup from a bpffs. -func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { - link, err := LoadPinnedRawLink(fileName, CgroupType, opts) - if err != nil { - return nil, err - } - - return &linkCgroup{*link}, nil -} - -type progAttachCgroup struct { - cgroup *os.File - current *ebpf.Program - attachType ebpf.AttachType - flags cgroupAttachFlags -} - -var _ Link = (*progAttachCgroup)(nil) - -func (cg *progAttachCgroup) isLink() {} - -func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) { - if flags&flagAllowMulti > 0 { - if err := haveProgAttachReplace(); err != nil { - return nil, fmt.Errorf("can't support multiple programs: %w", err) - } - } - - err := RawAttachProgram(RawAttachProgramOptions{ - Target: int(cgroup.Fd()), - Program: prog, - Flags: uint32(flags), - Attach: attach, - }) - if err != nil { - return nil, fmt.Errorf("cgroup: %w", err) - } - - return &progAttachCgroup{cgroup, prog, attach, flags}, nil -} - -func (cg *progAttachCgroup) Close() error { - defer cg.cgroup.Close() - defer cg.current.Close() - - err := RawDetachProgram(RawDetachProgramOptions{ - Target: int(cg.cgroup.Fd()), - Program: cg.current, - Attach: cg.attachType, - }) - if err != nil { - return fmt.Errorf("close cgroup: %s", err) - } - return nil -} - -func (cg *progAttachCgroup) Update(prog *ebpf.Program) error { - new, err := prog.Clone() - if err != nil { - return err - } - - args := RawAttachProgramOptions{ - Target: int(cg.cgroup.Fd()), - Program: prog, - Attach: cg.attachType, - Flags: uint32(cg.flags), - } - - if cg.flags&flagAllowMulti > 0 { - // Atomically replacing multiple programs requires at least - // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf - // program in MULTI mode") - args.Flags |= uint32(flagReplace) - args.Replace = cg.current - } - - if err := RawAttachProgram(args); err != nil { - new.Close() - return fmt.Errorf("can't update cgroup: %s", err) - } - - cg.current.Close() - cg.current = new - return nil -} - -func (cg *progAttachCgroup) Pin(string) error { - return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) -} - -func (cg *progAttachCgroup) Unpin() error { - return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) -} - -type linkCgroup struct { - RawLink -} - -var _ Link = (*linkCgroup)(nil) - -func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) { - link, err := AttachRawLink(RawLinkOptions{ - Target: int(cgroup.Fd()), - Program: prog, - Attach: attach, - }) - if err != nil { - return nil, err - } - - return &linkCgroup{*link}, err -} diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go deleted file mode 100644 index 2bde35e..0000000 --- a/vendor/github.com/cilium/ebpf/link/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package link allows attaching eBPF programs to various kernel hooks. -package link diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go deleted file mode 100644 index 654d34e..0000000 --- a/vendor/github.com/cilium/ebpf/link/iter.go +++ /dev/null @@ -1,100 +0,0 @@ -package link - -import ( - "fmt" - "io" - "unsafe" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" -) - -type IterOptions struct { - // Program must be of type Tracing with attach type - // AttachTraceIter. The kind of iterator to attach to is - // determined at load time via the AttachTo field. - // - // AttachTo requires the kernel to include BTF of itself, - // and it to be compiled with a recent pahole (>= 1.16). - Program *ebpf.Program - - // Map specifies the target map for bpf_map_elem and sockmap iterators. - // It may be nil. - Map *ebpf.Map -} - -// AttachIter attaches a BPF seq_file iterator. -func AttachIter(opts IterOptions) (*Iter, error) { - if err := haveBPFLink(); err != nil { - return nil, err - } - - progFd := opts.Program.FD() - if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) - } - - var info bpfIterLinkInfoMap - if opts.Map != nil { - mapFd := opts.Map.FD() - if mapFd < 0 { - return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd) - } - info.map_fd = uint32(mapFd) - } - - attr := bpfLinkCreateIterAttr{ - prog_fd: uint32(progFd), - attach_type: ebpf.AttachTraceIter, - iter_info: internal.NewPointer(unsafe.Pointer(&info)), - iter_info_len: uint32(unsafe.Sizeof(info)), - } - - fd, err := bpfLinkCreateIter(&attr) - if err != nil { - return nil, fmt.Errorf("can't link iterator: %w", err) - } - - return &Iter{RawLink{fd, ""}}, err -} - -// LoadPinnedIter loads a pinned iterator from a bpffs. -func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) { - link, err := LoadPinnedRawLink(fileName, IterType, opts) - if err != nil { - return nil, err - } - - return &Iter{*link}, err -} - -// Iter represents an attached bpf_iter. -type Iter struct { - RawLink -} - -// Open creates a new instance of the iterator. -// -// Reading from the returned reader triggers the BPF program. -func (it *Iter) Open() (io.ReadCloser, error) { - linkFd, err := it.fd.Value() - if err != nil { - return nil, err - } - - attr := &bpfIterCreateAttr{ - linkFd: linkFd, - } - - fd, err := bpfIterCreate(attr) - if err != nil { - return nil, fmt.Errorf("can't create iterator: %w", err) - } - - return fd.File("bpf_iter"), nil -} - -// union bpf_iter_link_info.map -type bpfIterLinkInfoMap struct { - map_fd uint32 -} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go deleted file mode 100644 index ea71d6d..0000000 --- a/vendor/github.com/cilium/ebpf/link/kprobe.go +++ /dev/null @@ -1,438 +0,0 @@ -package link - -import ( - "bytes" - "crypto/rand" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "unsafe" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/unix" -) - -var ( - kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events") - - kprobeRetprobeBit = struct { - once sync.Once - value uint64 - err error - }{} -) - -type probeType uint8 - -const ( - kprobeType probeType = iota - uprobeType -) - -func (pt probeType) String() string { - if pt == kprobeType { - return "kprobe" - } - return "uprobe" -} - -func (pt probeType) EventsPath() string { - if pt == kprobeType { - return kprobeEventsPath - } - return uprobeEventsPath -} - -func (pt probeType) PerfEventType(ret bool) perfEventType { - if pt == kprobeType { - if ret { - return kretprobeEvent - } - return kprobeEvent - } - if ret { - return uretprobeEvent - } - return uprobeEvent -} - -func (pt probeType) RetprobeBit() (uint64, error) { - if pt == kprobeType { - return kretprobeBit() - } - return uretprobeBit() -} - -// Kprobe attaches the given eBPF program to a perf event that fires when the -// given kernel symbol starts executing. See /proc/kallsyms for available -// symbols. For example, printk(): -// -// Kprobe("printk", prog) -// -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. -func Kprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, false) - if err != nil { - return nil, err - } - - err = k.attach(prog) - if err != nil { - k.Close() - return nil, err - } - - return k, nil -} - -// Kretprobe attaches the given eBPF program to a perf event that fires right -// before the given kernel symbol exits, with the function stack left intact. -// See /proc/kallsyms for available symbols. For example, printk(): -// -// Kretprobe("printk", prog) -// -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. -func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, true) - if err != nil { - return nil, err - } - - err = k.attach(prog) - if err != nil { - k.Close() - return nil, err - } - - return k, nil -} - -// kprobe opens a perf event on the given symbol and attaches prog to it. -// If ret is true, create a kretprobe. -func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { - if symbol == "" { - return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) - } - if prog == nil { - return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) - } - if !rgxTraceEvent.MatchString(symbol) { - return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput) - } - if prog.Type() != ebpf.Kprobe { - return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) - } - - // Use kprobe PMU if the kernel has it available. - tp, err := pmuKprobe(platformPrefix(symbol), ret) - if errors.Is(err, os.ErrNotExist) { - tp, err = pmuKprobe(symbol, ret) - } - if err == nil { - return tp, nil - } - if err != nil && !errors.Is(err, ErrNotSupported) { - return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err) - } - - // Use tracefs if kprobe PMU is missing. - tp, err = tracefsKprobe(platformPrefix(symbol), ret) - if errors.Is(err, os.ErrNotExist) { - tp, err = tracefsKprobe(symbol, ret) - } - if err != nil { - return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err) - } - - return tp, nil -} - -// pmuKprobe opens a perf event based on the kprobe PMU. -// Returns os.ErrNotExist if the given symbol does not exist in the kernel. -func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { - return pmuProbe(kprobeType, symbol, "", 0, ret) -} - -// pmuProbe opens a perf event based on a Performance Monitoring Unit. -// -// Requires at least a 4.17 kernel. -// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU" -// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" -// -// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU -func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - // Getting the PMU type will fail if the kernel doesn't support - // the perf_[k,u]probe PMU. - et, err := getPMUEventType(typ) - if err != nil { - return nil, err - } - - var config uint64 - if ret { - bit, err := typ.RetprobeBit() - if err != nil { - return nil, err - } - config |= 1 << bit - } - - var ( - attr unix.PerfEventAttr - sp unsafe.Pointer - ) - switch typ { - case kprobeType: - // Create a pointer to a NUL-terminated string for the kernel. - sp, err := unsafeStringPtr(symbol) - if err != nil { - return nil, err - } - - attr = unix.PerfEventAttr{ - Type: uint32(et), // PMU event type read from sysfs - Ext1: uint64(uintptr(sp)), // Kernel symbol to trace - Config: config, // Retprobe flag - } - case uprobeType: - sp, err := unsafeStringPtr(path) - if err != nil { - return nil, err - } - - attr = unix.PerfEventAttr{ - // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, - // since it added the config2 (Ext2) field. The Size field controls the - // size of the internal buffer the kernel allocates for reading the - // perf_event_attr argument from userspace. - Size: unix.PERF_ATTR_SIZE_VER1, - Type: uint32(et), // PMU event type read from sysfs - Ext1: uint64(uintptr(sp)), // Uprobe path - Ext2: offset, // Uprobe offset - Config: config, // Retprobe flag - } - } - - fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) - - // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL - // when trying to create a kretprobe for a missing symbol. Make sure ENOENT - // is returned to the caller. - if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist) - } - if err != nil { - return nil, fmt.Errorf("opening perf event: %w", err) - } - - // Ensure the string pointer is not collected before PerfEventOpen returns. - runtime.KeepAlive(sp) - - // Kernel has perf_[k,u]probe PMU available, initialize perf event. - return &perfEvent{ - fd: internal.NewFD(uint32(fd)), - pmuID: et, - name: symbol, - typ: typ.PerfEventType(ret), - }, nil -} - -// tracefsKprobe creates a Kprobe tracefs entry. -func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { - return tracefsProbe(kprobeType, symbol, "", 0, ret) -} - -// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. -// A new trace event group name is generated on every call to support creating -// multiple trace events for the same kernel or userspace symbol. -// Path and offset are only set in the case of uprobe(s) and are used to set -// the executable/library path on the filesystem and the offset where the probe is inserted. -// A perf event is then opened on the newly-created trace event and returned to the caller. -func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - // Generate a random string for each trace event we attempt to create. - // This value is used as the 'group' token in tracefs to allow creating - // multiple kprobe trace events with the same name. - group, err := randomGroup("ebpf") - if err != nil { - return nil, fmt.Errorf("randomizing group name: %w", err) - } - - // Before attempting to create a trace event through tracefs, - // check if an event with the same group and name already exists. - // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate - // entry, so we need to rely on reads for detecting uniqueness. - _, err = getTraceEventID(group, symbol) - if err == nil { - return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol) - } - if err != nil && !errors.Is(err, os.ErrNotExist) { - return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err) - } - - // Create the [k,u]probe trace event using tracefs. - if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil { - return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) - } - - // Get the newly-created trace event's id. - tid, err := getTraceEventID(group, symbol) - if err != nil { - return nil, fmt.Errorf("getting trace event id: %w", err) - } - - // Kprobes are ephemeral tracepoints and share the same perf event type. - fd, err := openTracepointPerfEvent(tid) - if err != nil { - return nil, err - } - - return &perfEvent{ - fd: fd, - group: group, - name: symbol, - tracefsID: tid, - typ: typ.PerfEventType(ret), - }, nil -} - -// createTraceFSProbeEvent creates a new ephemeral trace event by writing to -// /[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid -// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist -// if a probe with the same group and symbol already exists. -func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error { - // Open the kprobe_events file in tracefs. - f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) - if err != nil { - return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err) - } - defer f.Close() - - var pe string - switch typ { - case kprobeType: - // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): - // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe - // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe - // -:[GRP/]EVENT : Clear a probe - // - // Some examples: - // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy - // p:ebpf_5678/p_my_kprobe __x64_sys_execve - // - // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the - // kernel default to NR_CPUS. This is desired in most eBPF cases since - // subsampling or rate limiting logic can be more accurately implemented in - // the eBPF program itself. - // See Documentation/kprobes.txt for more details. - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol) - case uprobeType: - // The uprobe_events syntax is as follows: - // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe - // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe - // -:[GRP/]EVENT : Clear a probe - // - // Some examples: - // r:ebpf_1234/readline /bin/bash:0x12345 - // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345 - // - // See Documentation/trace/uprobetracer.txt for more details. - pathOffset := uprobePathOffset(path, offset) - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset) - } - _, err = f.WriteString(pe) - // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL - // when trying to create a kretprobe for a missing symbol. Make sure ENOENT - // is returned to the caller. - if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist) - } - if err != nil { - return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) - } - - return nil -} - -// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol -// from /[k,u]probe_events. -func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { - f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) - if err != nil { - return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err) - } - defer f.Close() - - // See [k,u]probe_events syntax above. The probe type does not need to be specified - // for removals. - pe := fmt.Sprintf("-:%s/%s", group, symbol) - if _, err = f.WriteString(pe); err != nil { - return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) - } - - return nil -} - -// randomGroup generates a pseudorandom string for use as a tracefs group name. -// Returns an error when the output string would exceed 63 characters (kernel -// limitation), when rand.Read() fails or when prefix contains characters not -// allowed by rgxTraceEvent. -func randomGroup(prefix string) (string, error) { - if !rgxTraceEvent.MatchString(prefix) { - return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput) - } - - b := make([]byte, 8) - if _, err := rand.Read(b); err != nil { - return "", fmt.Errorf("reading random bytes: %w", err) - } - - group := fmt.Sprintf("%s_%x", prefix, b) - if len(group) > 63 { - return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput) - } - - return group, nil -} - -func probePrefix(ret bool) string { - if ret { - return "r" - } - return "p" -} - -// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit -// from /sys/bus/event_source/devices//format/retprobe. -func determineRetprobeBit(typ probeType) (uint64, error) { - p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe") - - data, err := ioutil.ReadFile(p) - if err != nil { - return 0, err - } - - var rp uint64 - n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp) - if err != nil { - return 0, fmt.Errorf("parse retprobe bit: %w", err) - } - if n != 1 { - return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n) - } - - return rp, nil -} - -func kretprobeBit() (uint64, error) { - kprobeRetprobeBit.once.Do(func() { - kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType) - }) - return kprobeRetprobeBit.value, kprobeRetprobeBit.err -} diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go deleted file mode 100644 index 16cfff4..0000000 --- a/vendor/github.com/cilium/ebpf/link/link.go +++ /dev/null @@ -1,229 +0,0 @@ -package link - -import ( - "fmt" - "unsafe" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" -) - -var ErrNotSupported = internal.ErrNotSupported - -// Link represents a Program attached to a BPF hook. -type Link interface { - // Replace the current program with a new program. - // - // Passing a nil program is an error. May return an error wrapping ErrNotSupported. - Update(*ebpf.Program) error - - // Persist a link by pinning it into a bpffs. - // - // May return an error wrapping ErrNotSupported. - Pin(string) error - - // Undo a previous call to Pin. - // - // May return an error wrapping ErrNotSupported. - Unpin() error - - // Close frees resources. - // - // The link will be broken unless it has been pinned. A link - // may continue past the lifetime of the process if Close is - // not called. - Close() error - - // Prevent external users from implementing this interface. - isLink() -} - -// ID uniquely identifies a BPF link. -type ID uint32 - -// RawLinkOptions control the creation of a raw link. -type RawLinkOptions struct { - // File descriptor to attach to. This differs for each attach type. - Target int - // Program to attach. - Program *ebpf.Program - // Attach must match the attach type of Program. - Attach ebpf.AttachType -} - -// RawLinkInfo contains metadata on a link. -type RawLinkInfo struct { - Type Type - ID ID - Program ebpf.ProgramID -} - -// RawLink is the low-level API to bpf_link. -// -// You should consider using the higher level interfaces in this -// package instead. -type RawLink struct { - fd *internal.FD - pinnedPath string -} - -// AttachRawLink creates a raw link. -func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { - if err := haveBPFLink(); err != nil { - return nil, err - } - - if opts.Target < 0 { - return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd) - } - - progFd := opts.Program.FD() - if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) - } - - attr := bpfLinkCreateAttr{ - targetFd: uint32(opts.Target), - progFd: uint32(progFd), - attachType: opts.Attach, - } - fd, err := bpfLinkCreate(&attr) - if err != nil { - return nil, fmt.Errorf("can't create link: %s", err) - } - - return &RawLink{fd, ""}, nil -} - -// LoadPinnedRawLink loads a persisted link from a bpffs. -// -// Returns an error if the pinned link type doesn't match linkType. Pass -// UnspecifiedType to disable this behaviour. -func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions) (*RawLink, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) - if err != nil { - return nil, fmt.Errorf("load pinned link: %w", err) - } - - link := &RawLink{fd, fileName} - if linkType == UnspecifiedType { - return link, nil - } - - info, err := link.Info() - if err != nil { - link.Close() - return nil, fmt.Errorf("get pinned link info: %s", err) - } - - if info.Type != linkType { - link.Close() - return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, linkType) - } - - return link, nil -} - -func (l *RawLink) isLink() {} - -// FD returns the raw file descriptor. -func (l *RawLink) FD() int { - fd, err := l.fd.Value() - if err != nil { - return -1 - } - return int(fd) -} - -// Close breaks the link. -// -// Use Pin if you want to make the link persistent. -func (l *RawLink) Close() error { - return l.fd.Close() -} - -// Pin persists a link past the lifetime of the process. -// -// Calling Close on a pinned Link will not break the link -// until the pin is removed. -func (l *RawLink) Pin(fileName string) error { - if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil { - return err - } - l.pinnedPath = fileName - return nil -} - -// Unpin implements the Link interface. -func (l *RawLink) Unpin() error { - if err := internal.Unpin(l.pinnedPath); err != nil { - return err - } - l.pinnedPath = "" - return nil -} - -// Update implements the Link interface. -func (l *RawLink) Update(new *ebpf.Program) error { - return l.UpdateArgs(RawLinkUpdateOptions{ - New: new, - }) -} - -// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs. -type RawLinkUpdateOptions struct { - New *ebpf.Program - Old *ebpf.Program - Flags uint32 -} - -// UpdateArgs updates a link based on args. -func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { - newFd := opts.New.FD() - if newFd < 0 { - return fmt.Errorf("invalid program: %s", internal.ErrClosedFd) - } - - var oldFd int - if opts.Old != nil { - oldFd = opts.Old.FD() - if oldFd < 0 { - return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd) - } - } - - linkFd, err := l.fd.Value() - if err != nil { - return fmt.Errorf("can't update link: %s", err) - } - - attr := bpfLinkUpdateAttr{ - linkFd: linkFd, - newProgFd: uint32(newFd), - oldProgFd: uint32(oldFd), - flags: opts.Flags, - } - return bpfLinkUpdate(&attr) -} - -// struct bpf_link_info -type bpfLinkInfo struct { - typ uint32 - id uint32 - prog_id uint32 -} - -// Info returns metadata about the link. -func (l *RawLink) Info() (*RawLinkInfo, error) { - var info bpfLinkInfo - err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { - return nil, fmt.Errorf("link info: %s", err) - } - - return &RawLinkInfo{ - Type(info.typ), - ID(info.id), - ebpf.ProgramID(info.prog_id), - }, nil -} diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go deleted file mode 100644 index 37e5b84..0000000 --- a/vendor/github.com/cilium/ebpf/link/netns.go +++ /dev/null @@ -1,60 +0,0 @@ -package link - -import ( - "fmt" - - "github.com/cilium/ebpf" -) - -// NetNsInfo contains metadata about a network namespace link. -type NetNsInfo struct { - RawLinkInfo -} - -// NetNsLink is a program attached to a network namespace. -type NetNsLink struct { - *RawLink -} - -// AttachNetNs attaches a program to a network namespace. -func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { - var attach ebpf.AttachType - switch t := prog.Type(); t { - case ebpf.FlowDissector: - attach = ebpf.AttachFlowDissector - case ebpf.SkLookup: - attach = ebpf.AttachSkLookup - default: - return nil, fmt.Errorf("can't attach %v to network namespace", t) - } - - link, err := AttachRawLink(RawLinkOptions{ - Target: ns, - Program: prog, - Attach: attach, - }) - if err != nil { - return nil, err - } - - return &NetNsLink{link}, nil -} - -// LoadPinnedNetNs loads a network namespace link from bpffs. -func LoadPinnedNetNs(fileName string, opts *ebpf.LoadPinOptions) (*NetNsLink, error) { - link, err := LoadPinnedRawLink(fileName, NetNsType, opts) - if err != nil { - return nil, err - } - - return &NetNsLink{link}, nil -} - -// Info returns information about the link. -func (nns *NetNsLink) Info() (*NetNsInfo, error) { - info, err := nns.RawLink.Info() - if err != nil { - return nil, err - } - return &NetNsInfo{*info}, nil -} diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go deleted file mode 100644 index 5267a47..0000000 --- a/vendor/github.com/cilium/ebpf/link/perf_event.go +++ /dev/null @@ -1,273 +0,0 @@ -package link - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "unsafe" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/unix" -) - -// Getting the terminology right is usually the hardest part. For posterity and -// for staying sane during implementation: -// -// - trace event: Representation of a kernel runtime hook. Filesystem entries -// under /events. Can be tracepoints (static), kprobes or uprobes. -// Can be instantiated into perf events (see below). -// - tracepoint: A predetermined hook point in the kernel. Exposed as trace -// events in (sub)directories under /events. Cannot be closed or -// removed, they are static. -// - k(ret)probe: Ephemeral trace events based on entry or exit points of -// exported kernel symbols. kprobe-based (tracefs) trace events can be -// created system-wide by writing to the /kprobe_events file, or -// they can be scoped to the current process by creating PMU perf events. -// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries -// and offsets. uprobe-based (tracefs) trace events can be -// created system-wide by writing to the /uprobe_events file, or -// they can be scoped to the current process by creating PMU perf events. -// - perf event: An object instantiated based on an existing trace event or -// kernel symbol. Referred to by fd in userspace. -// Exactly one eBPF program can be attached to a perf event. Multiple perf -// events can be created from a single trace event. Closing a perf event -// stops any further invocations of the attached eBPF program. - -var ( - tracefsPath = "/sys/kernel/debug/tracing" - - // Trace event groups, names and kernel symbols must adhere to this set - // of characters. Non-empty, first character must not be a number, all - // characters must be alphanumeric or underscore. - rgxTraceEvent = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$") - - errInvalidInput = errors.New("invalid input") -) - -const ( - perfAllThreads = -1 -) - -type perfEventType uint8 - -const ( - tracepointEvent perfEventType = iota - kprobeEvent - kretprobeEvent - uprobeEvent - uretprobeEvent -) - -// A perfEvent represents a perf event kernel object. Exactly one eBPF program -// can be attached to it. It is created based on a tracefs trace event or a -// Performance Monitoring Unit (PMU). -type perfEvent struct { - - // Group and name of the tracepoint/kprobe/uprobe. - group string - name string - - // PMU event ID read from sysfs. Valid IDs are non-zero. - pmuID uint64 - // ID of the trace event read from tracefs. Valid IDs are non-zero. - tracefsID uint64 - - // The event type determines the types of programs that can be attached. - typ perfEventType - - fd *internal.FD -} - -func (pe *perfEvent) isLink() {} - -func (pe *perfEvent) Pin(string) error { - return fmt.Errorf("pin perf event: %w", ErrNotSupported) -} - -func (pe *perfEvent) Unpin() error { - return fmt.Errorf("unpin perf event: %w", ErrNotSupported) -} - -// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), -// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array -// owned by the perf event, which means multiple programs can be attached -// simultaneously. -// -// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event -// returns EEXIST. -// -// Detaching a program from a perf event is currently not possible, so a -// program replacement mechanism cannot be implemented for perf events. -func (pe *perfEvent) Update(prog *ebpf.Program) error { - return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported) -} - -func (pe *perfEvent) Close() error { - if pe.fd == nil { - return nil - } - - pfd, err := pe.fd.Value() - if err != nil { - return fmt.Errorf("getting perf event fd: %w", err) - } - - err = unix.IoctlSetInt(int(pfd), unix.PERF_EVENT_IOC_DISABLE, 0) - if err != nil { - return fmt.Errorf("disabling perf event: %w", err) - } - - err = pe.fd.Close() - if err != nil { - return fmt.Errorf("closing perf event fd: %w", err) - } - - switch pe.typ { - case kprobeEvent, kretprobeEvent: - // Clean up kprobe tracefs entry. - if pe.tracefsID != 0 { - return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name) - } - case uprobeEvent, uretprobeEvent: - // Clean up uprobe tracefs entry. - if pe.tracefsID != 0 { - return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name) - } - case tracepointEvent: - // Tracepoint trace events don't hold any extra resources. - return nil - } - - return nil -} - -// attach the given eBPF prog to the perf event stored in pe. -// pe must contain a valid perf event fd. -// prog's type must match the program type stored in pe. -func (pe *perfEvent) attach(prog *ebpf.Program) error { - if prog == nil { - return errors.New("cannot attach a nil program") - } - if pe.fd == nil { - return errors.New("cannot attach to nil perf event") - } - if prog.FD() < 0 { - return fmt.Errorf("invalid program: %w", internal.ErrClosedFd) - } - switch pe.typ { - case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent: - if t := prog.Type(); t != ebpf.Kprobe { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) - } - case tracepointEvent: - if t := prog.Type(); t != ebpf.TracePoint { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) - } - default: - return fmt.Errorf("unknown perf event type: %d", pe.typ) - } - - // The ioctl below will fail when the fd is invalid. - kfd, _ := pe.fd.Value() - - // Assign the eBPF program to the perf event. - err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) - if err != nil { - return fmt.Errorf("setting perf event bpf program: %w", err) - } - - // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. - if err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { - return fmt.Errorf("enable perf event: %s", err) - } - - // Close the perf event when its reference is lost to avoid leaking system resources. - runtime.SetFinalizer(pe, (*perfEvent).Close) - return nil -} - -// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. -func unsafeStringPtr(str string) (unsafe.Pointer, error) { - p, err := unix.BytePtrFromString(str) - if err != nil { - return nil, err - } - return unsafe.Pointer(p), nil -} - -// getTraceEventID reads a trace event's ID from tracefs given its group and name. -// group and name must be alphanumeric or underscore, as required by the kernel. -func getTraceEventID(group, name string) (uint64, error) { - tid, err := uint64FromFile(tracefsPath, "events", group, name, "id") - if errors.Is(err, os.ErrNotExist) { - return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist) - } - if err != nil { - return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) - } - - return tid, nil -} - -// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier) -// from /sys/bus/event_source/devices//type. -// -// Returns ErrNotSupported if the pmu type is not supported. -func getPMUEventType(typ probeType) (uint64, error) { - et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type") - if errors.Is(err, os.ErrNotExist) { - return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported) - } - if err != nil { - return 0, fmt.Errorf("reading pmu type %s: %w", typ, err) - } - - return et, nil -} - -// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide -// [k,u]probes created by writing to /[k,u]probe_events are tracepoints -// behind the scenes, and can be attached to using these perf events. -func openTracepointPerfEvent(tid uint64) (*internal.FD, error) { - attr := unix.PerfEventAttr{ - Type: unix.PERF_TYPE_TRACEPOINT, - Config: tid, - Sample_type: unix.PERF_SAMPLE_RAW, - Sample: 1, - Wakeup: 1, - } - - fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) - if err != nil { - return nil, fmt.Errorf("opening tracepoint perf event: %w", err) - } - - return internal.NewFD(uint32(fd)), nil -} - -// uint64FromFile reads a uint64 from a file. All elements of path are sanitized -// and joined onto base. Returns error if base no longer prefixes the path after -// joining all components. -func uint64FromFile(base string, path ...string) (uint64, error) { - l := filepath.Join(path...) - p := filepath.Join(base, l) - if !strings.HasPrefix(p, base) { - return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput) - } - - data, err := ioutil.ReadFile(p) - if err != nil { - return 0, fmt.Errorf("reading file %s: %w", p, err) - } - - et := bytes.TrimSpace(data) - return strconv.ParseUint(string(et), 10, 64) -} diff --git a/vendor/github.com/cilium/ebpf/link/platform.go b/vendor/github.com/cilium/ebpf/link/platform.go deleted file mode 100644 index eb6f7b7..0000000 --- a/vendor/github.com/cilium/ebpf/link/platform.go +++ /dev/null @@ -1,25 +0,0 @@ -package link - -import ( - "fmt" - "runtime" -) - -func platformPrefix(symbol string) string { - - prefix := runtime.GOARCH - - // per https://github.com/golang/go/blob/master/src/go/build/syslist.go - switch prefix { - case "386": - prefix = "ia32" - case "amd64", "amd64p32": - prefix = "x64" - case "arm64", "arm64be": - prefix = "arm64" - default: - return symbol - } - - return fmt.Sprintf("__%s_%s", prefix, symbol) -} diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go deleted file mode 100644 index b90c457..0000000 --- a/vendor/github.com/cilium/ebpf/link/program.go +++ /dev/null @@ -1,76 +0,0 @@ -package link - -import ( - "fmt" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" -) - -type RawAttachProgramOptions struct { - // File descriptor to attach to. This differs for each attach type. - Target int - // Program to attach. - Program *ebpf.Program - // Program to replace (cgroups). - Replace *ebpf.Program - // Attach must match the attach type of Program (and Replace). - Attach ebpf.AttachType - // Flags control the attach behaviour. This differs for each attach type. - Flags uint32 -} - -// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH. -// -// You should use one of the higher level abstractions available in this -// package if possible. -func RawAttachProgram(opts RawAttachProgramOptions) error { - if err := haveProgAttach(); err != nil { - return err - } - - var replaceFd uint32 - if opts.Replace != nil { - replaceFd = uint32(opts.Replace.FD()) - } - - attr := internal.BPFProgAttachAttr{ - TargetFd: uint32(opts.Target), - AttachBpfFd: uint32(opts.Program.FD()), - ReplaceBpfFd: replaceFd, - AttachType: uint32(opts.Attach), - AttachFlags: uint32(opts.Flags), - } - - if err := internal.BPFProgAttach(&attr); err != nil { - return fmt.Errorf("can't attach program: %w", err) - } - return nil -} - -type RawDetachProgramOptions struct { - Target int - Program *ebpf.Program - Attach ebpf.AttachType -} - -// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH. -// -// You should use one of the higher level abstractions available in this -// package if possible. -func RawDetachProgram(opts RawDetachProgramOptions) error { - if err := haveProgAttach(); err != nil { - return err - } - - attr := internal.BPFProgDetachAttr{ - TargetFd: uint32(opts.Target), - AttachBpfFd: uint32(opts.Program.FD()), - AttachType: uint32(opts.Attach), - } - if err := internal.BPFProgDetach(&attr); err != nil { - return fmt.Errorf("can't detach program: %w", err) - } - - return nil -} diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go deleted file mode 100644 index f4beb1e..0000000 --- a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go +++ /dev/null @@ -1,61 +0,0 @@ -package link - -import ( - "fmt" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" -) - -type RawTracepointOptions struct { - // Tracepoint name. - Name string - // Program must be of type RawTracepoint* - Program *ebpf.Program -} - -// AttachRawTracepoint links a BPF program to a raw_tracepoint. -// -// Requires at least Linux 4.17. -func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { - if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable { - return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) - } - if opts.Program.FD() < 0 { - return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd) - } - - fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{ - name: internal.NewStringPointer(opts.Name), - fd: uint32(opts.Program.FD()), - }) - if err != nil { - return nil, err - } - - return &progAttachRawTracepoint{fd: fd}, nil -} - -type progAttachRawTracepoint struct { - fd *internal.FD -} - -var _ Link = (*progAttachRawTracepoint)(nil) - -func (rt *progAttachRawTracepoint) isLink() {} - -func (rt *progAttachRawTracepoint) Close() error { - return rt.fd.Close() -} - -func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error { - return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported) -} - -func (rt *progAttachRawTracepoint) Pin(_ string) error { - return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported) -} - -func (rt *progAttachRawTracepoint) Unpin() error { - return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) -} diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go deleted file mode 100644 index 30e8a88..0000000 --- a/vendor/github.com/cilium/ebpf/link/syscalls.go +++ /dev/null @@ -1,190 +0,0 @@ -package link - -import ( - "errors" - "unsafe" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/unix" -) - -// Type is the kind of link. -type Type uint32 - -// Valid link types. -// -// Equivalent to enum bpf_link_type. -const ( - UnspecifiedType Type = iota - RawTracepointType - TracingType - CgroupType - IterType - NetNsType - XDPType -) - -var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - }) - if err != nil { - return internal.ErrNotSupported - } - - // BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB, - // so being able to load the program is enough to infer that we - // have the syscall. - prog.Close() - return nil -}) - -var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error { - if err := haveProgAttach(); err != nil { - return err - } - - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - }) - if err != nil { - return internal.ErrNotSupported - } - defer prog.Close() - - // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. - // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't - // present. - attr := internal.BPFProgAttachAttr{ - // We rely on this being checked after attachFlags. - TargetFd: ^uint32(0), - AttachBpfFd: uint32(prog.FD()), - AttachType: uint32(ebpf.AttachCGroupInetIngress), - AttachFlags: uint32(flagReplace), - } - - err = internal.BPFProgAttach(&attr) - if errors.Is(err, unix.EINVAL) { - return internal.ErrNotSupported - } - if errors.Is(err, unix.EBADF) { - return nil - } - return err -}) - -type bpfLinkCreateAttr struct { - progFd uint32 - targetFd uint32 - attachType ebpf.AttachType - flags uint32 -} - -func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkCreateIterAttr struct { - prog_fd uint32 - target_fd uint32 - attach_type ebpf.AttachType - flags uint32 - iter_info internal.Pointer - iter_info_len uint32 -} - -func bpfLinkCreateIter(attr *bpfLinkCreateIterAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkUpdateAttr struct { - linkFd uint32 - newProgFd uint32 - flags uint32 - oldProgFd uint32 -} - -func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error { - _, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - }) - if err != nil { - return internal.ErrNotSupported - } - defer prog.Close() - - attr := bpfLinkCreateAttr{ - // This is a hopefully invalid file descriptor, which triggers EBADF. - targetFd: ^uint32(0), - progFd: uint32(prog.FD()), - attachType: ebpf.AttachCGroupInetIngress, - } - _, err = bpfLinkCreate(&attr) - if errors.Is(err, unix.EINVAL) { - return internal.ErrNotSupported - } - if errors.Is(err, unix.EBADF) { - return nil - } - return err -}) - -type bpfIterCreateAttr struct { - linkFd uint32 - flags uint32 -} - -func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} - -type bpfRawTracepointOpenAttr struct { - name internal.Pointer - fd uint32 - _ uint32 -} - -func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go deleted file mode 100644 index b8ae04b..0000000 --- a/vendor/github.com/cilium/ebpf/link/tracepoint.go +++ /dev/null @@ -1,56 +0,0 @@ -package link - -import ( - "fmt" - - "github.com/cilium/ebpf" -) - -// Tracepoint attaches the given eBPF program to the tracepoint with the given -// group and name. See /sys/kernel/debug/tracing/events to find available -// tracepoints. The top-level directory is the group, the event's subdirectory -// is the name. Example: -// -// Tracepoint("syscalls", "sys_enter_fork", prog) -// -// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is -// only possible as of kernel 4.14 (commit cf5f5ce). -func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { - if group == "" || name == "" { - return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) - } - if prog == nil { - return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) - } - if !rgxTraceEvent.MatchString(group) || !rgxTraceEvent.MatchString(name) { - return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput) - } - if prog.Type() != ebpf.TracePoint { - return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput) - } - - tid, err := getTraceEventID(group, name) - if err != nil { - return nil, err - } - - fd, err := openTracepointPerfEvent(tid) - if err != nil { - return nil, err - } - - pe := &perfEvent{ - fd: fd, - tracefsID: tid, - group: group, - name: name, - typ: tracepointEvent, - } - - if err := pe.attach(prog); err != nil { - pe.Close() - return nil, err - } - - return pe, nil -} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go deleted file mode 100644 index 2bc395e..0000000 --- a/vendor/github.com/cilium/ebpf/link/uprobe.go +++ /dev/null @@ -1,237 +0,0 @@ -package link - -import ( - "debug/elf" - "errors" - "fmt" - "os" - "path/filepath" - "regexp" - "sync" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" -) - -var ( - uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events") - - // rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol - // as they are not allowed to be used as the EVENT token in tracefs. - rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+") - - uprobeRetprobeBit = struct { - once sync.Once - value uint64 - err error - }{} -) - -// Executable defines an executable program on the filesystem. -type Executable struct { - // Path of the executable on the filesystem. - path string - // Parsed ELF symbols and dynamic symbols. - symbols map[string]elf.Symbol -} - -// UprobeOptions defines additional parameters that will be used -// when loading Uprobes. -type UprobeOptions struct { - // Symbol offset. Must be provided in case of external symbols (shared libs). - // If set, overrides the offset eventually parsed from the executable. - Offset uint64 -} - -// To open a new Executable, use: -// -// OpenExecutable("/bin/bash") -// -// The returned value can then be used to open Uprobe(s). -func OpenExecutable(path string) (*Executable, error) { - if path == "" { - return nil, fmt.Errorf("path cannot be empty") - } - - f, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("open file '%s': %w", path, err) - } - defer f.Close() - - se, err := internal.NewSafeELFFile(f) - if err != nil { - return nil, fmt.Errorf("parse ELF file: %w", err) - } - - var ex = Executable{ - path: path, - symbols: make(map[string]elf.Symbol), - } - if err := ex.addSymbols(se.Symbols); err != nil { - return nil, err - } - - if err := ex.addSymbols(se.DynamicSymbols); err != nil { - return nil, err - } - - return &ex, nil -} - -func (ex *Executable) addSymbols(f func() ([]elf.Symbol, error)) error { - // elf.Symbols and elf.DynamicSymbols return ErrNoSymbols if the section is not found. - syms, err := f() - if err != nil && !errors.Is(err, elf.ErrNoSymbols) { - return err - } - for _, s := range syms { - if elf.ST_TYPE(s.Info) != elf.STT_FUNC { - // Symbol not associated with a function or other executable code. - continue - } - ex.symbols[s.Name] = s - } - return nil -} - -func (ex *Executable) symbol(symbol string) (*elf.Symbol, error) { - if s, ok := ex.symbols[symbol]; ok { - return &s, nil - } - return nil, fmt.Errorf("symbol %s not found", symbol) -} - -// Uprobe attaches the given eBPF program to a perf event that fires when the -// given symbol starts executing in the given Executable. -// For example, /bin/bash::main(): -// -// ex, _ = OpenExecutable("/bin/bash") -// ex.Uprobe("main", prog, nil) -// -// When using symbols which belongs to shared libraries, -// an offset must be provided via options: -// -// ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) -// -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. Functions provided by shared libraries can currently not -// be traced and will result in an ErrNotSupported. -func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { - u, err := ex.uprobe(symbol, prog, opts, false) - if err != nil { - return nil, err - } - - err = u.attach(prog) - if err != nil { - u.Close() - return nil, err - } - - return u, nil -} - -// Uretprobe attaches the given eBPF program to a perf event that fires right -// before the given symbol exits. For example, /bin/bash::main(): -// -// ex, _ = OpenExecutable("/bin/bash") -// ex.Uretprobe("main", prog, nil) -// -// When using symbols which belongs to shared libraries, -// an offset must be provided via options: -// -// ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) -// -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. Functions provided by shared libraries can currently not -// be traced and will result in an ErrNotSupported. -func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { - u, err := ex.uprobe(symbol, prog, opts, true) - if err != nil { - return nil, err - } - - err = u.attach(prog) - if err != nil { - u.Close() - return nil, err - } - - return u, nil -} - -// uprobe opens a perf event for the given binary/symbol and attaches prog to it. -// If ret is true, create a uretprobe. -func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) { - if prog == nil { - return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) - } - if prog.Type() != ebpf.Kprobe { - return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) - } - - var offset uint64 - if opts != nil && opts.Offset != 0 { - offset = opts.Offset - } else { - sym, err := ex.symbol(symbol) - if err != nil { - return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, err) - } - - // Symbols with location 0 from section undef are shared library calls and - // are relocated before the binary is executed. Dynamic linking is not - // implemented by the library, so mark this as unsupported for now. - if sym.Section == elf.SHN_UNDEF && sym.Value == 0 { - return nil, fmt.Errorf("cannot resolve %s library call '%s', "+ - "consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported) - } - - offset = sym.Value - } - - // Use uprobe PMU if the kernel has it available. - tp, err := pmuUprobe(symbol, ex.path, offset, ret) - if err == nil { - return tp, nil - } - if err != nil && !errors.Is(err, ErrNotSupported) { - return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) - } - - // Use tracefs if uprobe PMU is missing. - tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, ret) - if err != nil { - return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) - } - - return tp, nil -} - -// pmuUprobe opens a perf event based on the uprobe PMU. -func pmuUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - return pmuProbe(uprobeType, symbol, path, offset, ret) -} - -// tracefsUprobe creates a Uprobe tracefs entry. -func tracefsUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - return tracefsProbe(uprobeType, symbol, path, offset, ret) -} - -// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore. -func uprobeSanitizedSymbol(symbol string) string { - return rgxUprobeSymbol.ReplaceAllString(symbol, "_") -} - -// uprobePathOffset creates the PATH:OFFSET token for the tracefs api. -func uprobePathOffset(path string, offset uint64) string { - return fmt.Sprintf("%s:%#x", path, offset) -} - -func uretprobeBit() (uint64, error) { - uprobeRetprobeBit.once.Do(func() { - uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType) - }) - return uprobeRetprobeBit.value, uprobeRetprobeBit.err -} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go deleted file mode 100644 index 6c2efef..0000000 --- a/vendor/github.com/cilium/ebpf/linker.go +++ /dev/null @@ -1,140 +0,0 @@ -package ebpf - -import ( - "fmt" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal/btf" -) - -// link resolves bpf-to-bpf calls. -// -// Each library may contain multiple functions / labels, and is only linked -// if prog references one of these functions. -// -// Libraries also linked. -func link(prog *ProgramSpec, libs []*ProgramSpec) error { - var ( - linked = make(map[*ProgramSpec]bool) - pending = []asm.Instructions{prog.Instructions} - insns asm.Instructions - ) - for len(pending) > 0 { - insns, pending = pending[0], pending[1:] - for _, lib := range libs { - if linked[lib] { - continue - } - - needed, err := needSection(insns, lib.Instructions) - if err != nil { - return fmt.Errorf("linking %s: %w", lib.Name, err) - } - - if !needed { - continue - } - - linked[lib] = true - prog.Instructions = append(prog.Instructions, lib.Instructions...) - pending = append(pending, lib.Instructions) - - if prog.BTF != nil && lib.BTF != nil { - if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil { - return fmt.Errorf("linking BTF of %s: %w", lib.Name, err) - } - } - } - } - - return nil -} - -func needSection(insns, section asm.Instructions) (bool, error) { - // A map of symbols to the libraries which contain them. - symbols, err := section.SymbolOffsets() - if err != nil { - return false, err - } - - for _, ins := range insns { - if ins.Reference == "" { - continue - } - - if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall { - continue - } - - if ins.Constant != -1 { - // This is already a valid call, no need to link again. - continue - } - - if _, ok := symbols[ins.Reference]; !ok { - // Symbol isn't available in this section - continue - } - - // At this point we know that at least one function in the - // library is called from insns, so we have to link it. - return true, nil - } - - // None of the functions in the section are called. - return false, nil -} - -func fixupJumpsAndCalls(insns asm.Instructions) error { - symbolOffsets := make(map[string]asm.RawInstructionOffset) - iter := insns.Iterate() - for iter.Next() { - ins := iter.Ins - - if ins.Symbol == "" { - continue - } - - if _, ok := symbolOffsets[ins.Symbol]; ok { - return fmt.Errorf("duplicate symbol %s", ins.Symbol) - } - - symbolOffsets[ins.Symbol] = iter.Offset - } - - iter = insns.Iterate() - for iter.Next() { - i := iter.Index - offset := iter.Offset - ins := iter.Ins - - if ins.Reference == "" { - continue - } - - switch { - case ins.IsFunctionCall() && ins.Constant == -1: - // Rewrite bpf to bpf call - callOffset, ok := symbolOffsets[ins.Reference] - if !ok { - return fmt.Errorf("call at %d: reference to missing symbol %q", i, ins.Reference) - } - - ins.Constant = int64(callOffset - offset - 1) - - case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1: - // Rewrite jump to label - jumpOffset, ok := symbolOffsets[ins.Reference] - if !ok { - return fmt.Errorf("jump at %d: reference to missing symbol %q", i, ins.Reference) - } - - ins.Offset = int16(jumpOffset - offset - 1) - - case ins.IsLoadFromMap() && ins.MapPtr() == -1: - return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedReference) - } - } - - return nil -} diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go deleted file mode 100644 index f257d88..0000000 --- a/vendor/github.com/cilium/ebpf/map.go +++ /dev/null @@ -1,1232 +0,0 @@ -package ebpf - -import ( - "errors" - "fmt" - "io" - "path/filepath" - "reflect" - "strings" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" - "github.com/cilium/ebpf/internal/unix" -) - -// Errors returned by Map and MapIterator methods. -var ( - ErrKeyNotExist = errors.New("key does not exist") - ErrKeyExist = errors.New("key already exists") - ErrIterationAborted = errors.New("iteration aborted") - ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map") -) - -// MapOptions control loading a map into the kernel. -type MapOptions struct { - // The base path to pin maps in if requested via PinByName. - // Existing maps will be re-used if they are compatible, otherwise an - // error is returned. - PinPath string - LoadPinOptions LoadPinOptions -} - -// MapID represents the unique ID of an eBPF map -type MapID uint32 - -// MapSpec defines a Map. -type MapSpec struct { - // Name is passed to the kernel as a debug aid. Must only contain - // alpha numeric and '_' characters. - Name string - Type MapType - KeySize uint32 - ValueSize uint32 - MaxEntries uint32 - - // Flags is passed to the kernel and specifies additional map - // creation attributes. - Flags uint32 - - // Automatically pin and load a map from MapOptions.PinPath. - // Generates an error if an existing pinned map is incompatible with the MapSpec. - Pinning PinType - - // Specify numa node during map creation - // (effective only if unix.BPF_F_NUMA_NODE flag is set, - // which can be imported from golang.org/x/sys/unix) - NumaNode uint32 - - // The initial contents of the map. May be nil. - Contents []MapKV - - // Whether to freeze a map after setting its initial contents. - Freeze bool - - // InnerMap is used as a template for ArrayOfMaps and HashOfMaps - InnerMap *MapSpec - - // The BTF associated with this map. - BTF *btf.Map -} - -func (ms *MapSpec) String() string { - return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags) -} - -// Copy returns a copy of the spec. -// -// MapSpec.Contents is a shallow copy. -func (ms *MapSpec) Copy() *MapSpec { - if ms == nil { - return nil - } - - cpy := *ms - cpy.Contents = make([]MapKV, len(ms.Contents)) - copy(cpy.Contents, ms.Contents) - cpy.InnerMap = ms.InnerMap.Copy() - return &cpy -} - -func (ms *MapSpec) clampPerfEventArraySize() error { - if ms.Type != PerfEventArray { - return nil - } - - n, err := internal.PossibleCPUs() - if err != nil { - return fmt.Errorf("perf event array: %w", err) - } - - if n := uint32(n); ms.MaxEntries > n { - ms.MaxEntries = n - } - - return nil -} - -// MapKV is used to initialize the contents of a Map. -type MapKV struct { - Key interface{} - Value interface{} -} - -func (ms *MapSpec) checkCompatibility(m *Map) error { - switch { - case m.typ != ms.Type: - return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible) - - case m.keySize != ms.KeySize: - return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible) - - case m.valueSize != ms.ValueSize: - return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible) - - case m.maxEntries != ms.MaxEntries: - return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible) - - case m.flags != ms.Flags: - return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible) - } - return nil -} - -// Map represents a Map file descriptor. -// -// It is not safe to close a map which is used by other goroutines. -// -// Methods which take interface{} arguments by default encode -// them using binary.Read/Write in the machine's native endianness. -// -// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler -// if you require custom encoding. -type Map struct { - name string - fd *internal.FD - typ MapType - keySize uint32 - valueSize uint32 - maxEntries uint32 - flags uint32 - pinnedPath string - // Per CPU maps return values larger than the size in the spec - fullValueSize int -} - -// NewMapFromFD creates a map from a raw fd. -// -// You should not use fd after calling this function. -func NewMapFromFD(fd int) (*Map, error) { - if fd < 0 { - return nil, errors.New("invalid fd") - } - - return newMapFromFD(internal.NewFD(uint32(fd))) -} - -func newMapFromFD(fd *internal.FD) (*Map, error) { - info, err := newMapInfoFromFd(fd) - if err != nil { - fd.Close() - return nil, fmt.Errorf("get map info: %s", err) - } - - return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) -} - -// NewMap creates a new Map. -// -// It's equivalent to calling NewMapWithOptions with default options. -func NewMap(spec *MapSpec) (*Map, error) { - return NewMapWithOptions(spec, MapOptions{}) -} - -// NewMapWithOptions creates a new Map. -// -// Creating a map for the first time will perform feature detection -// by creating small, temporary maps. -// -// The caller is responsible for ensuring the process' rlimit is set -// sufficiently high for locking memory during map creation. This can be done -// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMapWithOptions. -// -// May return an error wrapping ErrMapIncompatible. -func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { - handles := newHandleCache() - defer handles.close() - - return newMapWithOptions(spec, opts, handles) -} - -func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) { - closeOnError := func(c io.Closer) { - if err != nil { - c.Close() - } - } - - switch spec.Pinning { - case PinByName: - if spec.Name == "" || opts.PinPath == "" { - return nil, fmt.Errorf("pin by name: missing Name or PinPath") - } - - path := filepath.Join(opts.PinPath, spec.Name) - m, err := LoadPinnedMap(path, &opts.LoadPinOptions) - if errors.Is(err, unix.ENOENT) { - break - } - if err != nil { - return nil, fmt.Errorf("load pinned map: %w", err) - } - defer closeOnError(m) - - if err := spec.checkCompatibility(m); err != nil { - return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err) - } - - return m, nil - - case PinNone: - // Nothing to do here - - default: - return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) - } - - var innerFd *internal.FD - if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { - if spec.InnerMap == nil { - return nil, fmt.Errorf("%s requires InnerMap", spec.Type) - } - - if spec.InnerMap.Pinning != PinNone { - return nil, errors.New("inner maps cannot be pinned") - } - - template, err := createMap(spec.InnerMap, nil, opts, handles) - if err != nil { - return nil, err - } - defer template.Close() - - innerFd = template.fd - } - - m, err := createMap(spec, innerFd, opts, handles) - if err != nil { - return nil, err - } - defer closeOnError(m) - - if spec.Pinning == PinByName { - path := filepath.Join(opts.PinPath, spec.Name) - if err := m.Pin(path); err != nil { - return nil, fmt.Errorf("pin map: %s", err) - } - } - - return m, nil -} - -func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { - closeOnError := func(closer io.Closer) { - if err != nil { - closer.Close() - } - } - - spec = spec.Copy() - - switch spec.Type { - case ArrayOfMaps: - fallthrough - case HashOfMaps: - if err := haveNestedMaps(); err != nil { - return nil, err - } - - if spec.ValueSize != 0 && spec.ValueSize != 4 { - return nil, errors.New("ValueSize must be zero or four for map of map") - } - spec.ValueSize = 4 - - case PerfEventArray: - if spec.KeySize != 0 && spec.KeySize != 4 { - return nil, errors.New("KeySize must be zero or four for perf event array") - } - spec.KeySize = 4 - - if spec.ValueSize != 0 && spec.ValueSize != 4 { - return nil, errors.New("ValueSize must be zero or four for perf event array") - } - spec.ValueSize = 4 - - if spec.MaxEntries == 0 { - n, err := internal.PossibleCPUs() - if err != nil { - return nil, fmt.Errorf("perf event array: %w", err) - } - spec.MaxEntries = uint32(n) - } - } - - if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze { - if err := haveMapMutabilityModifiers(); err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - } - if spec.Flags&unix.BPF_F_MMAPABLE > 0 { - if err := haveMmapableMaps(); err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - } - if spec.Flags&unix.BPF_F_INNER_MAP > 0 { - if err := haveInnerMaps(); err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - } - - attr := internal.BPFMapCreateAttr{ - MapType: uint32(spec.Type), - KeySize: spec.KeySize, - ValueSize: spec.ValueSize, - MaxEntries: spec.MaxEntries, - Flags: spec.Flags, - NumaNode: spec.NumaNode, - } - - if inner != nil { - var err error - attr.InnerMapFd, err = inner.Value() - if err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - } - - if haveObjName() == nil { - attr.MapName = internal.NewBPFObjName(spec.Name) - } - - var btfDisabled bool - if spec.BTF != nil { - handle, err := handles.btfHandle(btf.MapSpec(spec.BTF)) - btfDisabled = errors.Is(err, btf.ErrNotSupported) - if err != nil && !btfDisabled { - return nil, fmt.Errorf("load BTF: %w", err) - } - - if handle != nil { - attr.BTFFd = uint32(handle.FD()) - attr.BTFKeyTypeID = uint32(btf.MapKey(spec.BTF).ID()) - attr.BTFValueTypeID = uint32(btf.MapValue(spec.BTF).ID()) - } - } - - fd, err := internal.BPFMapCreate(&attr) - if err != nil { - if errors.Is(err, unix.EPERM) { - return nil, fmt.Errorf("map create: RLIMIT_MEMLOCK may be too low: %w", err) - } - if btfDisabled { - return nil, fmt.Errorf("map create without BTF: %w", err) - } - return nil, fmt.Errorf("map create: %w", err) - } - defer closeOnError(fd) - - m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) - if err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - - if err := m.populate(spec.Contents); err != nil { - return nil, fmt.Errorf("map create: can't set initial contents: %w", err) - } - - if spec.Freeze { - if err := m.Freeze(); err != nil { - return nil, fmt.Errorf("can't freeze map: %w", err) - } - } - - return m, nil -} - -func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { - m := &Map{ - name, - fd, - typ, - keySize, - valueSize, - maxEntries, - flags, - "", - int(valueSize), - } - - if !typ.hasPerCPUValue() { - return m, nil - } - - possibleCPUs, err := internal.PossibleCPUs() - if err != nil { - return nil, err - } - - m.fullValueSize = align(int(valueSize), 8) * possibleCPUs - return m, nil -} - -func (m *Map) String() string { - if m.name != "" { - return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd) - } - return fmt.Sprintf("%s#%v", m.typ, m.fd) -} - -// Type returns the underlying type of the map. -func (m *Map) Type() MapType { - return m.typ -} - -// KeySize returns the size of the map key in bytes. -func (m *Map) KeySize() uint32 { - return m.keySize -} - -// ValueSize returns the size of the map value in bytes. -func (m *Map) ValueSize() uint32 { - return m.valueSize -} - -// MaxEntries returns the maximum number of elements the map can hold. -func (m *Map) MaxEntries() uint32 { - return m.maxEntries -} - -// Flags returns the flags of the map. -func (m *Map) Flags() uint32 { - return m.flags -} - -// Info returns metadata about the map. -func (m *Map) Info() (*MapInfo, error) { - return newMapInfoFromFd(m.fd) -} - -// Lookup retrieves a value from a Map. -// -// Calls Close() on valueOut if it is of type **Map or **Program, -// and *valueOut is not nil. -// -// Returns an error if the key doesn't exist, see ErrKeyNotExist. -func (m *Map) Lookup(key, valueOut interface{}) error { - valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - if err := m.lookup(key, valuePtr); err != nil { - return err - } - - return m.unmarshalValue(valueOut, valueBytes) -} - -// LookupAndDelete retrieves and deletes a value from a Map. -// -// Returns ErrKeyNotExist if the key doesn't exist. -func (m *Map) LookupAndDelete(key, valueOut interface{}) error { - valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - - if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil { - return fmt.Errorf("lookup and delete failed: %w", err) - } - - return m.unmarshalValue(valueOut, valueBytes) -} - -// LookupBytes gets a value from Map. -// -// Returns a nil value if a key doesn't exist. -func (m *Map) LookupBytes(key interface{}) ([]byte, error) { - valueBytes := make([]byte, m.fullValueSize) - valuePtr := internal.NewSlicePointer(valueBytes) - - err := m.lookup(key, valuePtr) - if errors.Is(err, ErrKeyNotExist) { - return nil, nil - } - - return valueBytes, err -} - -func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error { - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - - if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil { - return fmt.Errorf("lookup failed: %w", err) - } - return nil -} - -// MapUpdateFlags controls the behaviour of the Map.Update call. -// -// The exact semantics depend on the specific MapType. -type MapUpdateFlags uint64 - -const ( - // UpdateAny creates a new element or update an existing one. - UpdateAny MapUpdateFlags = iota - // UpdateNoExist creates a new element. - UpdateNoExist MapUpdateFlags = 1 << (iota - 1) - // UpdateExist updates an existing element. - UpdateExist -) - -// Put replaces or creates a value in map. -// -// It is equivalent to calling Update with UpdateAny. -func (m *Map) Put(key, value interface{}) error { - return m.Update(key, value, UpdateAny) -} - -// Update changes the value of a key. -func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error { - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - - valuePtr, err := m.marshalValue(value) - if err != nil { - return fmt.Errorf("can't marshal value: %w", err) - } - - if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil { - return fmt.Errorf("update failed: %w", err) - } - - return nil -} - -// Delete removes a value. -// -// Returns ErrKeyNotExist if the key does not exist. -func (m *Map) Delete(key interface{}) error { - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - - if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil { - return fmt.Errorf("delete failed: %w", err) - } - return nil -} - -// NextKey finds the key following an initial key. -// -// See NextKeyBytes for details. -// -// Returns ErrKeyNotExist if there is no next key. -func (m *Map) NextKey(key, nextKeyOut interface{}) error { - nextKeyPtr, nextKeyBytes := makeBuffer(nextKeyOut, int(m.keySize)) - - if err := m.nextKey(key, nextKeyPtr); err != nil { - return err - } - - if err := m.unmarshalKey(nextKeyOut, nextKeyBytes); err != nil { - return fmt.Errorf("can't unmarshal next key: %w", err) - } - return nil -} - -// NextKeyBytes returns the key following an initial key as a byte slice. -// -// Passing nil will return the first key. -// -// Use Iterate if you want to traverse all entries in the map. -// -// Returns nil if there are no more keys. -func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { - nextKey := make([]byte, m.keySize) - nextKeyPtr := internal.NewSlicePointer(nextKey) - - err := m.nextKey(key, nextKeyPtr) - if errors.Is(err, ErrKeyNotExist) { - return nil, nil - } - - return nextKey, err -} - -func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { - var ( - keyPtr internal.Pointer - err error - ) - - if key != nil { - keyPtr, err = m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - } - - if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil { - return fmt.Errorf("next key failed: %w", err) - } - return nil -} - -// BatchLookup looks up many elements in a map at once. -// -// "keysOut" and "valuesOut" must be of type slice, a pointer -// to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. -// -// ErrKeyNotExist is returned when the batch lookup has reached -// the end of all possible results, even when partial results -// are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) -} - -// BatchLookupAndDelete looks up many elements in a map at once, -// -// It then deletes all those elements. -// "keysOut" and "valuesOut" must be of type slice, a pointer -// to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. -// -// ErrKeyNotExist is returned when the batch lookup has reached -// the end of all possible results, even when partial results -// are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) -} - -func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - if err := haveBatchAPI(); err != nil { - return 0, err - } - if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported - } - keysValue := reflect.ValueOf(keysOut) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") - } - valuesValue := reflect.ValueOf(valuesOut) - if valuesValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("valuesOut must be a slice") - } - count := keysValue.Len() - if count != valuesValue.Len() { - return 0, fmt.Errorf("keysOut and valuesOut must be the same length") - } - keyBuf := make([]byte, count*int(m.keySize)) - keyPtr := internal.NewSlicePointer(keyBuf) - valueBuf := make([]byte, count*int(m.fullValueSize)) - valuePtr := internal.NewSlicePointer(valueBuf) - - var ( - startPtr internal.Pointer - err error - retErr error - ) - if startKey != nil { - startPtr, err = marshalPtr(startKey, int(m.keySize)) - if err != nil { - return 0, err - } - } - nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) - - ct, err := bpfMapBatch(cmd, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts) - if err != nil { - if !errors.Is(err, ErrKeyNotExist) { - return 0, err - } - retErr = ErrKeyNotExist - } - - err = m.unmarshalKey(nextKeyOut, nextBuf) - if err != nil { - return 0, err - } - err = unmarshalBytes(keysOut, keyBuf) - if err != nil { - return 0, err - } - err = unmarshalBytes(valuesOut, valueBuf) - if err != nil { - retErr = err - } - return int(ct), retErr -} - -// BatchUpdate updates the map with multiple keys and values -// simultaneously. -// "keys" and "values" must be of type slice, a pointer -// to a slice or buffer will not work. -func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { - if err := haveBatchAPI(); err != nil { - return 0, err - } - if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported - } - keysValue := reflect.ValueOf(keys) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") - } - valuesValue := reflect.ValueOf(values) - if valuesValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("values must be a slice") - } - var ( - count = keysValue.Len() - valuePtr internal.Pointer - err error - ) - if count != valuesValue.Len() { - return 0, fmt.Errorf("keys and values must be the same length") - } - keyPtr, err := marshalPtr(keys, count*int(m.keySize)) - if err != nil { - return 0, err - } - valuePtr, err = marshalPtr(values, count*int(m.valueSize)) - if err != nil { - return 0, err - } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, valuePtr, uint32(count), opts) - return int(ct), err -} - -// BatchDelete batch deletes entries in the map by keys. -// "keys" must be of type slice, a pointer to a slice or buffer will not work. -func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { - if err := haveBatchAPI(); err != nil { - return 0, err - } - if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported - } - keysValue := reflect.ValueOf(keys) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") - } - count := keysValue.Len() - keyPtr, err := marshalPtr(keys, count*int(m.keySize)) - if err != nil { - return 0, fmt.Errorf("cannot marshal keys: %v", err) - } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_DELETE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, nilPtr, uint32(count), opts) - return int(ct), err -} - -// Iterate traverses a map. -// -// It's safe to create multiple iterators at the same time. -// -// It's not possible to guarantee that all keys in a map will be -// returned if there are concurrent modifications to the map. -func (m *Map) Iterate() *MapIterator { - return newMapIterator(m) -} - -// Close removes a Map -func (m *Map) Close() error { - if m == nil { - // This makes it easier to clean up when iterating maps - // of maps / programs. - return nil - } - - return m.fd.Close() -} - -// FD gets the file descriptor of the Map. -// -// Calling this function is invalid after Close has been called. -func (m *Map) FD() int { - fd, err := m.fd.Value() - if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 - } - - return int(fd) -} - -// Clone creates a duplicate of the Map. -// -// Closing the duplicate does not affect the original, and vice versa. -// Changes made to the map are reflected by both instances however. -// If the original map was pinned, the cloned map will not be pinned by default. -// -// Cloning a nil Map returns nil. -func (m *Map) Clone() (*Map, error) { - if m == nil { - return nil, nil - } - - dup, err := m.fd.Dup() - if err != nil { - return nil, fmt.Errorf("can't clone map: %w", err) - } - - return &Map{ - m.name, - dup, - m.typ, - m.keySize, - m.valueSize, - m.maxEntries, - m.flags, - "", - m.fullValueSize, - }, nil -} - -// Pin persists the map on the BPF virtual file system past the lifetime of -// the process that created it . -// -// Calling Pin on a previously pinned map will overwrite the path, except when -// the new path already exists. Re-pinning across filesystems is not supported. -// You can Clone a map to pin it to a different path. -// -// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs -func (m *Map) Pin(fileName string) error { - if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil { - return err - } - m.pinnedPath = fileName - return nil -} - -// Unpin removes the persisted state for the map from the BPF virtual filesystem. -// -// Failed calls to Unpin will not alter the state returned by IsPinned. -// -// Unpinning an unpinned Map returns nil. -func (m *Map) Unpin() error { - if err := internal.Unpin(m.pinnedPath); err != nil { - return err - } - m.pinnedPath = "" - return nil -} - -// IsPinned returns true if the map has a non-empty pinned path. -func (m *Map) IsPinned() bool { - return m.pinnedPath != "" -} - -// Freeze prevents a map to be modified from user space. -// -// It makes no changes to kernel-side restrictions. -func (m *Map) Freeze() error { - if err := haveMapMutabilityModifiers(); err != nil { - return fmt.Errorf("can't freeze map: %w", err) - } - - if err := bpfMapFreeze(m.fd); err != nil { - return fmt.Errorf("can't freeze map: %w", err) - } - return nil -} - -func (m *Map) populate(contents []MapKV) error { - for _, kv := range contents { - if err := m.Put(kv.Key, kv.Value); err != nil { - return fmt.Errorf("key %v: %w", kv.Key, err) - } - } - return nil -} - -func (m *Map) marshalKey(data interface{}) (internal.Pointer, error) { - if data == nil { - if m.keySize == 0 { - // Queues have a key length of zero, so passing nil here is valid. - return internal.NewPointer(nil), nil - } - return internal.Pointer{}, errors.New("can't use nil as key of map") - } - - return marshalPtr(data, int(m.keySize)) -} - -func (m *Map) unmarshalKey(data interface{}, buf []byte) error { - if buf == nil { - // This is from a makeBuffer call, nothing do do here. - return nil - } - - return unmarshalBytes(data, buf) -} - -func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { - if m.typ.hasPerCPUValue() { - return marshalPerCPUValue(data, int(m.valueSize)) - } - - var ( - buf []byte - err error - ) - - switch value := data.(type) { - case *Map: - if !m.typ.canStoreMap() { - return internal.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) - } - buf, err = marshalMap(value, int(m.valueSize)) - - case *Program: - if !m.typ.canStoreProgram() { - return internal.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) - } - buf, err = marshalProgram(value, int(m.valueSize)) - - default: - return marshalPtr(data, int(m.valueSize)) - } - - if err != nil { - return internal.Pointer{}, err - } - - return internal.NewSlicePointer(buf), nil -} - -func (m *Map) unmarshalValue(value interface{}, buf []byte) error { - if buf == nil { - // This is from a makeBuffer call, nothing do do here. - return nil - } - - if m.typ.hasPerCPUValue() { - return unmarshalPerCPUValue(value, int(m.valueSize), buf) - } - - switch value := value.(type) { - case **Map: - if !m.typ.canStoreMap() { - return fmt.Errorf("can't read a map from %s", m.typ) - } - - other, err := unmarshalMap(buf) - if err != nil { - return err - } - - // The caller might close the map externally, so ignore errors. - _ = (*value).Close() - - *value = other - return nil - - case *Map: - if !m.typ.canStoreMap() { - return fmt.Errorf("can't read a map from %s", m.typ) - } - return errors.New("require pointer to *Map") - - case **Program: - if !m.typ.canStoreProgram() { - return fmt.Errorf("can't read a program from %s", m.typ) - } - - other, err := unmarshalProgram(buf) - if err != nil { - return err - } - - // The caller might close the program externally, so ignore errors. - _ = (*value).Close() - - *value = other - return nil - - case *Program: - if !m.typ.canStoreProgram() { - return fmt.Errorf("can't read a program from %s", m.typ) - } - return errors.New("require pointer to *Program") - } - - return unmarshalBytes(value, buf) -} - -// LoadPinnedMap loads a Map from a BPF file. -func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) - if err != nil { - return nil, err - } - - m, err := newMapFromFD(fd) - if err == nil { - m.pinnedPath = fileName - } - - return m, err -} - -// unmarshalMap creates a map from a map ID encoded in host endianness. -func unmarshalMap(buf []byte) (*Map, error) { - if len(buf) != 4 { - return nil, errors.New("map id requires 4 byte value") - } - - id := internal.NativeEndian.Uint32(buf) - return NewMapFromID(MapID(id)) -} - -// marshalMap marshals the fd of a map into a buffer in host endianness. -func marshalMap(m *Map, length int) ([]byte, error) { - if length != 4 { - return nil, fmt.Errorf("can't marshal map to %d bytes", length) - } - - fd, err := m.fd.Value() - if err != nil { - return nil, err - } - - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, fd) - return buf, nil -} - -func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error { - replaced := make(map[string]bool) - replace := func(name string, offset, size int, replacement interface{}) error { - if offset+size > len(value) { - return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size) - } - - buf, err := marshalBytes(replacement, size) - if err != nil { - return fmt.Errorf("marshal %s: %w", name, err) - } - - copy(value[offset:offset+size], buf) - replaced[name] = true - return nil - } - - switch parent := typ.(type) { - case *btf.Datasec: - for _, secinfo := range parent.Vars { - name := string(secinfo.Type.(*btf.Var).Name) - replacement, ok := replacements[name] - if !ok { - continue - } - - err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement) - if err != nil { - return err - } - } - - default: - return fmt.Errorf("patching %T is not supported", typ) - } - - if len(replaced) == len(replacements) { - return nil - } - - var missing []string - for name := range replacements { - if !replaced[name] { - missing = append(missing, name) - } - } - - if len(missing) == 1 { - return fmt.Errorf("unknown field: %s", missing[0]) - } - - return fmt.Errorf("unknown fields: %s", strings.Join(missing, ",")) -} - -// MapIterator iterates a Map. -// -// See Map.Iterate. -type MapIterator struct { - target *Map - prevKey interface{} - prevBytes []byte - count, maxEntries uint32 - done bool - err error -} - -func newMapIterator(target *Map) *MapIterator { - return &MapIterator{ - target: target, - maxEntries: target.maxEntries, - prevBytes: make([]byte, target.keySize), - } -} - -// Next decodes the next key and value. -// -// Iterating a hash map from which keys are being deleted is not -// safe. You may see the same key multiple times. Iteration may -// also abort with an error, see IsIterationAborted. -// -// Returns false if there are no more entries. You must check -// the result of Err afterwards. -// -// See Map.Get for further caveats around valueOut. -func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { - if mi.err != nil || mi.done { - return false - } - - // For array-like maps NextKeyBytes returns nil only on after maxEntries - // iterations. - for mi.count <= mi.maxEntries { - var nextBytes []byte - nextBytes, mi.err = mi.target.NextKeyBytes(mi.prevKey) - if mi.err != nil { - return false - } - - if nextBytes == nil { - mi.done = true - return false - } - - // The user can get access to nextBytes since unmarshalBytes - // does not copy when unmarshaling into a []byte. - // Make a copy to prevent accidental corruption of - // iterator state. - copy(mi.prevBytes, nextBytes) - mi.prevKey = mi.prevBytes - - mi.count++ - mi.err = mi.target.Lookup(nextBytes, valueOut) - if errors.Is(mi.err, ErrKeyNotExist) { - // Even though the key should be valid, we couldn't look up - // its value. If we're iterating a hash map this is probably - // because a concurrent delete removed the value before we - // could get it. This means that the next call to NextKeyBytes - // is very likely to restart iteration. - // If we're iterating one of the fd maps like - // ProgramArray it means that a given slot doesn't have - // a valid fd associated. It's OK to continue to the next slot. - continue - } - if mi.err != nil { - return false - } - - mi.err = mi.target.unmarshalKey(keyOut, nextBytes) - return mi.err == nil - } - - mi.err = fmt.Errorf("%w", ErrIterationAborted) - return false -} - -// Err returns any encountered error. -// -// The method must be called after Next returns nil. -// -// Returns ErrIterationAborted if it wasn't possible to do a full iteration. -func (mi *MapIterator) Err() error { - return mi.err -} - -// MapGetNextID returns the ID of the next eBPF map. -// -// Returns ErrNotExist, if there is no next eBPF map. -func MapGetNextID(startID MapID) (MapID, error) { - id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID)) - return MapID(id), err -} - -// NewMapFromID returns the map for a given id. -// -// Returns ErrNotExist, if there is no eBPF map with the given id. -func NewMapFromID(id MapID) (*Map, error) { - fd, err := bpfObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id)) - if err != nil { - return nil, err - } - - return newMapFromFD(fd) -} - -// ID returns the systemwide unique ID of the map. -// -// Deprecated: use MapInfo.ID() instead. -func (m *Map) ID() (MapID, error) { - info, err := bpfGetMapInfoByFD(m.fd) - if err != nil { - return MapID(0), err - } - return MapID(info.id), nil -} diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go deleted file mode 100644 index f2610ef..0000000 --- a/vendor/github.com/cilium/ebpf/marshalers.go +++ /dev/null @@ -1,218 +0,0 @@ -package ebpf - -import ( - "bytes" - "encoding" - "encoding/binary" - "errors" - "fmt" - "reflect" - "runtime" - "unsafe" - - "github.com/cilium/ebpf/internal" -) - -// marshalPtr converts an arbitrary value into a pointer suitable -// to be passed to the kernel. -// -// As an optimization, it returns the original value if it is an -// unsafe.Pointer. -func marshalPtr(data interface{}, length int) (internal.Pointer, error) { - if ptr, ok := data.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil - } - - buf, err := marshalBytes(data, length) - if err != nil { - return internal.Pointer{}, err - } - - return internal.NewSlicePointer(buf), nil -} - -// marshalBytes converts an arbitrary value into a byte buffer. -// -// Prefer using Map.marshalKey and Map.marshalValue if possible, since -// those have special cases that allow more types to be encoded. -// -// Returns an error if the given value isn't representable in exactly -// length bytes. -func marshalBytes(data interface{}, length int) (buf []byte, err error) { - switch value := data.(type) { - case encoding.BinaryMarshaler: - buf, err = value.MarshalBinary() - case string: - buf = []byte(value) - case []byte: - buf = value - case unsafe.Pointer: - err = errors.New("can't marshal from unsafe.Pointer") - case Map, *Map, Program, *Program: - err = fmt.Errorf("can't marshal %T", value) - default: - var wr bytes.Buffer - err = binary.Write(&wr, internal.NativeEndian, value) - if err != nil { - err = fmt.Errorf("encoding %T: %v", value, err) - } - buf = wr.Bytes() - } - if err != nil { - return nil, err - } - - if len(buf) != length { - return nil, fmt.Errorf("%T doesn't marshal to %d bytes", data, length) - } - return buf, nil -} - -func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) { - if ptr, ok := dst.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil - } - - buf := make([]byte, length) - return internal.NewSlicePointer(buf), buf -} - -// unmarshalBytes converts a byte buffer into an arbitrary value. -// -// Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since -// those have special cases that allow more types to be encoded. -func unmarshalBytes(data interface{}, buf []byte) error { - switch value := data.(type) { - case unsafe.Pointer: - // This could be solved in Go 1.17 by unsafe.Slice instead. (https://github.com/golang/go/issues/19367) - // We could opt for removing unsafe.Pointer support in the lib as well. - sh := &reflect.SliceHeader{ //nolint:govet - Data: uintptr(value), - Len: len(buf), - Cap: len(buf), - } - - dst := *(*[]byte)(unsafe.Pointer(sh)) - copy(dst, buf) - runtime.KeepAlive(value) - return nil - case Map, *Map, Program, *Program: - return fmt.Errorf("can't unmarshal into %T", value) - case encoding.BinaryUnmarshaler: - return value.UnmarshalBinary(buf) - case *string: - *value = string(buf) - return nil - case *[]byte: - *value = buf - return nil - case string: - return errors.New("require pointer to string") - case []byte: - return errors.New("require pointer to []byte") - default: - rd := bytes.NewReader(buf) - if err := binary.Read(rd, internal.NativeEndian, value); err != nil { - return fmt.Errorf("decoding %T: %v", value, err) - } - return nil - } -} - -// marshalPerCPUValue encodes a slice containing one value per -// possible CPU into a buffer of bytes. -// -// Values are initialized to zero if the slice has less elements than CPUs. -// -// slice must have a type like []elementType. -func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) { - sliceType := reflect.TypeOf(slice) - if sliceType.Kind() != reflect.Slice { - return internal.Pointer{}, errors.New("per-CPU value requires slice") - } - - possibleCPUs, err := internal.PossibleCPUs() - if err != nil { - return internal.Pointer{}, err - } - - sliceValue := reflect.ValueOf(slice) - sliceLen := sliceValue.Len() - if sliceLen > possibleCPUs { - return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") - } - - alignedElemLength := align(elemLength, 8) - buf := make([]byte, alignedElemLength*possibleCPUs) - - for i := 0; i < sliceLen; i++ { - elem := sliceValue.Index(i).Interface() - elemBytes, err := marshalBytes(elem, elemLength) - if err != nil { - return internal.Pointer{}, err - } - - offset := i * alignedElemLength - copy(buf[offset:offset+elemLength], elemBytes) - } - - return internal.NewSlicePointer(buf), nil -} - -// unmarshalPerCPUValue decodes a buffer into a slice containing one value per -// possible CPU. -// -// valueOut must have a type like *[]elementType -func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error { - slicePtrType := reflect.TypeOf(slicePtr) - if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { - return fmt.Errorf("per-cpu value requires pointer to slice") - } - - possibleCPUs, err := internal.PossibleCPUs() - if err != nil { - return err - } - - sliceType := slicePtrType.Elem() - slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) - - sliceElemType := sliceType.Elem() - sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr - if sliceElemIsPointer { - sliceElemType = sliceElemType.Elem() - } - - step := len(buf) / possibleCPUs - if step < elemLength { - return fmt.Errorf("per-cpu element length is larger than available data") - } - for i := 0; i < possibleCPUs; i++ { - var elem interface{} - if sliceElemIsPointer { - newElem := reflect.New(sliceElemType) - slice.Index(i).Set(newElem) - elem = newElem.Interface() - } else { - elem = slice.Index(i).Addr().Interface() - } - - // Make a copy, since unmarshal can hold on to itemBytes - elemBytes := make([]byte, elemLength) - copy(elemBytes, buf[:elemLength]) - - err := unmarshalBytes(elem, elemBytes) - if err != nil { - return fmt.Errorf("cpu %d: %w", i, err) - } - - buf = buf[step:] - } - - reflect.ValueOf(slicePtr).Elem().Set(slice) - return nil -} - -func align(n, alignment int) int { - return (int(n) + alignment - 1) / alignment * alignment -} diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go deleted file mode 100644 index 13bdb6d..0000000 --- a/vendor/github.com/cilium/ebpf/prog.go +++ /dev/null @@ -1,728 +0,0 @@ -package ebpf - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "path/filepath" - "strings" - "time" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" - "github.com/cilium/ebpf/internal/unix" -) - -// ErrNotSupported is returned whenever the kernel doesn't support a feature. -var ErrNotSupported = internal.ErrNotSupported - -var errUnsatisfiedReference = errors.New("unsatisfied reference") - -// ProgramID represents the unique ID of an eBPF program. -type ProgramID uint32 - -const ( - // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. - // This is currently the maximum of spare space allocated for SKB - // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN. - outputPad = 256 + 2 -) - -// DefaultVerifierLogSize is the default number of bytes allocated for the -// verifier log. -const DefaultVerifierLogSize = 64 * 1024 - -// ProgramOptions control loading a program into the kernel. -type ProgramOptions struct { - // Controls the detail emitted by the kernel verifier. Set to non-zero - // to enable logging. - LogLevel uint32 - // Controls the output buffer size for the verifier. Defaults to - // DefaultVerifierLogSize. - LogSize int - // An ELF containing the target BTF for this program. It is used both to - // find the correct function to trace and to apply CO-RE relocations. - // This is useful in environments where the kernel BTF is not available - // (containers) or where it is in a non-standard location. Defaults to - // use the kernel BTF from a well-known location. - TargetBTF io.ReaderAt -} - -// ProgramSpec defines a Program. -type ProgramSpec struct { - // Name is passed to the kernel as a debug aid. Must only contain - // alpha numeric and '_' characters. - Name string - // Type determines at which hook in the kernel a program will run. - Type ProgramType - AttachType AttachType - // Name of a kernel data structure to attach to. It's interpretation - // depends on Type and AttachType. - AttachTo string - Instructions asm.Instructions - // Flags is passed to the kernel and specifies additional program - // load attributes. - Flags uint32 - // License of the program. Some helpers are only available if - // the license is deemed compatible with the GPL. - // - // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1 - License string - - // Version used by Kprobe programs. - // - // Deprecated on kernels 5.0 and later. Leave empty to let the library - // detect this value automatically. - KernelVersion uint32 - - // The BTF associated with this program. Changing Instructions - // will most likely invalidate the contained data, and may - // result in errors when attempting to load it into the kernel. - BTF *btf.Program - - // The byte order this program was compiled for, may be nil. - ByteOrder binary.ByteOrder -} - -// Copy returns a copy of the spec. -func (ps *ProgramSpec) Copy() *ProgramSpec { - if ps == nil { - return nil - } - - cpy := *ps - cpy.Instructions = make(asm.Instructions, len(ps.Instructions)) - copy(cpy.Instructions, ps.Instructions) - return &cpy -} - -// Tag calculates the kernel tag for a series of instructions. -// -// Use asm.Instructions.Tag if you need to calculate for non-native endianness. -func (ps *ProgramSpec) Tag() (string, error) { - return ps.Instructions.Tag(internal.NativeEndian) -} - -// Program represents BPF program loaded into the kernel. -// -// It is not safe to close a Program which is used by other goroutines. -type Program struct { - // Contains the output of the kernel verifier if enabled, - // otherwise it is empty. - VerifierLog string - - fd *internal.FD - name string - pinnedPath string - typ ProgramType -} - -// NewProgram creates a new Program. -// -// Loading a program for the first time will perform -// feature detection by loading small, temporary programs. -func NewProgram(spec *ProgramSpec) (*Program, error) { - return NewProgramWithOptions(spec, ProgramOptions{}) -} - -// NewProgramWithOptions creates a new Program. -// -// Loading a program for the first time will perform -// feature detection by loading small, temporary programs. -func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { - handles := newHandleCache() - defer handles.close() - - prog, err := newProgramWithOptions(spec, opts, handles) - if errors.Is(err, errUnsatisfiedReference) { - return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) - } - return prog, err -} - -func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) { - if len(spec.Instructions) == 0 { - return nil, errors.New("Instructions cannot be empty") - } - - if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { - return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) - } - - // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load") - // require the version field to be set to the value of the KERNEL_VERSION - // macro for kprobe-type programs. - // Overwrite Kprobe program version if set to zero or the magic version constant. - kv := spec.KernelVersion - if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { - v, err := internal.KernelVersion() - if err != nil { - return nil, fmt.Errorf("detecting kernel version: %w", err) - } - kv = v.Kernel() - } - - attr := &bpfProgLoadAttr{ - progType: spec.Type, - progFlags: spec.Flags, - expectedAttachType: spec.AttachType, - license: internal.NewStringPointer(spec.License), - kernelVersion: kv, - } - - if haveObjName() == nil { - attr.progName = internal.NewBPFObjName(spec.Name) - } - - var err error - var targetBTF *btf.Spec - if opts.TargetBTF != nil { - targetBTF, err = handles.btfSpec(opts.TargetBTF) - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } - } - - var btfDisabled bool - var core btf.COREFixups - if spec.BTF != nil { - core, err = btf.ProgramFixups(spec.BTF, targetBTF) - if err != nil { - return nil, fmt.Errorf("CO-RE relocations: %w", err) - } - - handle, err := handles.btfHandle(btf.ProgramSpec(spec.BTF)) - btfDisabled = errors.Is(err, btf.ErrNotSupported) - if err != nil && !btfDisabled { - return nil, fmt.Errorf("load BTF: %w", err) - } - - if handle != nil { - attr.progBTFFd = uint32(handle.FD()) - - recSize, bytes, err := btf.ProgramLineInfos(spec.BTF) - if err != nil { - return nil, fmt.Errorf("get BTF line infos: %w", err) - } - attr.lineInfoRecSize = recSize - attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.lineInfo = internal.NewSlicePointer(bytes) - - recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF) - if err != nil { - return nil, fmt.Errorf("get BTF function infos: %w", err) - } - attr.funcInfoRecSize = recSize - attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.funcInfo = internal.NewSlicePointer(bytes) - } - } - - insns, err := core.Apply(spec.Instructions) - if err != nil { - return nil, fmt.Errorf("CO-RE fixup: %w", err) - } - - if err := fixupJumpsAndCalls(insns); err != nil { - return nil, err - } - - buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize)) - err = insns.Marshal(buf, internal.NativeEndian) - if err != nil { - return nil, err - } - - bytecode := buf.Bytes() - attr.instructions = internal.NewSlicePointer(bytecode) - attr.insCount = uint32(len(bytecode) / asm.InstructionSize) - - if spec.AttachTo != "" { - target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType) - if err != nil { - return nil, err - } - if target != nil { - attr.attachBTFID = target.ID() - } - } - - logSize := DefaultVerifierLogSize - if opts.LogSize > 0 { - logSize = opts.LogSize - } - - var logBuf []byte - if opts.LogLevel > 0 { - logBuf = make([]byte, logSize) - attr.logLevel = opts.LogLevel - attr.logSize = uint32(len(logBuf)) - attr.logBuf = internal.NewSlicePointer(logBuf) - } - - fd, err := bpfProgLoad(attr) - if err == nil { - return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil - } - - logErr := err - if opts.LogLevel == 0 && opts.LogSize >= 0 { - // Re-run with the verifier enabled to get better error messages. - logBuf = make([]byte, logSize) - attr.logLevel = 1 - attr.logSize = uint32(len(logBuf)) - attr.logBuf = internal.NewSlicePointer(logBuf) - - _, logErr = bpfProgLoad(attr) - } - - if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 { - // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can - // check that the log is empty to reduce false positives. - return nil, fmt.Errorf("load program: RLIMIT_MEMLOCK may be too low: %w", logErr) - } - - err = internal.ErrorWithLog(err, logBuf, logErr) - if btfDisabled { - return nil, fmt.Errorf("load program without BTF: %w", err) - } - return nil, fmt.Errorf("load program: %w", err) -} - -// NewProgramFromFD creates a program from a raw fd. -// -// You should not use fd after calling this function. -// -// Requires at least Linux 4.10. -func NewProgramFromFD(fd int) (*Program, error) { - if fd < 0 { - return nil, errors.New("invalid fd") - } - - return newProgramFromFD(internal.NewFD(uint32(fd))) -} - -// NewProgramFromID returns the program for a given id. -// -// Returns ErrNotExist, if there is no eBPF program with the given id. -func NewProgramFromID(id ProgramID) (*Program, error) { - fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) - if err != nil { - return nil, fmt.Errorf("get program by id: %w", err) - } - - return newProgramFromFD(fd) -} - -func newProgramFromFD(fd *internal.FD) (*Program, error) { - info, err := newProgramInfoFromFd(fd) - if err != nil { - fd.Close() - return nil, fmt.Errorf("discover program type: %w", err) - } - - return &Program{"", fd, "", "", info.Type}, nil -} - -func (p *Program) String() string { - if p.name != "" { - return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd) - } - return fmt.Sprintf("%s(%v)", p.typ, p.fd) -} - -// Type returns the underlying type of the program. -func (p *Program) Type() ProgramType { - return p.typ -} - -// Info returns metadata about the program. -// -// Requires at least 4.10. -func (p *Program) Info() (*ProgramInfo, error) { - return newProgramInfoFromFd(p.fd) -} - -// FD gets the file descriptor of the Program. -// -// It is invalid to call this function after Close has been called. -func (p *Program) FD() int { - fd, err := p.fd.Value() - if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 - } - - return int(fd) -} - -// Clone creates a duplicate of the Program. -// -// Closing the duplicate does not affect the original, and vice versa. -// -// Cloning a nil Program returns nil. -func (p *Program) Clone() (*Program, error) { - if p == nil { - return nil, nil - } - - dup, err := p.fd.Dup() - if err != nil { - return nil, fmt.Errorf("can't clone program: %w", err) - } - - return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil -} - -// Pin persists the Program on the BPF virtual file system past the lifetime of -// the process that created it -// -// Calling Pin on a previously pinned program will overwrite the path, except when -// the new path already exists. Re-pinning across filesystems is not supported. -// -// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs -func (p *Program) Pin(fileName string) error { - if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil { - return err - } - p.pinnedPath = fileName - return nil -} - -// Unpin removes the persisted state for the Program from the BPF virtual filesystem. -// -// Failed calls to Unpin will not alter the state returned by IsPinned. -// -// Unpinning an unpinned Program returns nil. -func (p *Program) Unpin() error { - if err := internal.Unpin(p.pinnedPath); err != nil { - return err - } - p.pinnedPath = "" - return nil -} - -// IsPinned returns true if the Program has a non-empty pinned path. -func (p *Program) IsPinned() bool { - return p.pinnedPath != "" -} - -// Close unloads the program from the kernel. -func (p *Program) Close() error { - if p == nil { - return nil - } - - return p.fd.Close() -} - -// Test runs the Program in the kernel with the given input and returns the -// value returned by the eBPF program. outLen may be zero. -// -// Note: the kernel expects at least 14 bytes input for an ethernet header for -// XDP and SKB programs. -// -// This function requires at least Linux 4.12. -func (p *Program) Test(in []byte) (uint32, []byte, error) { - ret, out, _, err := p.testRun(in, 1, nil) - if err != nil { - return ret, nil, fmt.Errorf("can't test program: %w", err) - } - return ret, out, nil -} - -// Benchmark runs the Program with the given input for a number of times -// and returns the time taken per iteration. -// -// Returns the result of the last execution of the program and the time per -// run or an error. reset is called whenever the benchmark syscall is -// interrupted, and should be set to testing.B.ResetTimer or similar. -// -// Note: profiling a call to this function will skew it's results, see -// https://github.com/cilium/ebpf/issues/24 -// -// This function requires at least Linux 4.12. -func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { - ret, _, total, err := p.testRun(in, repeat, reset) - if err != nil { - return ret, total, fmt.Errorf("can't benchmark program: %w", err) - } - return ret, total, nil -} - -var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error { - prog, err := NewProgram(&ProgramSpec{ - Type: SocketFilter, - Instructions: asm.Instructions{ - asm.LoadImm(asm.R0, 0, asm.DWord), - asm.Return(), - }, - License: "MIT", - }) - if err != nil { - // This may be because we lack sufficient permissions, etc. - return err - } - defer prog.Close() - - // Programs require at least 14 bytes input - in := make([]byte, 14) - attr := bpfProgTestRunAttr{ - fd: uint32(prog.FD()), - dataSizeIn: uint32(len(in)), - dataIn: internal.NewSlicePointer(in), - } - - err = bpfProgTestRun(&attr) - if errors.Is(err, unix.EINVAL) { - // Check for EINVAL specifically, rather than err != nil since we - // otherwise misdetect due to insufficient permissions. - return internal.ErrNotSupported - } - if errors.Is(err, unix.EINTR) { - // We know that PROG_TEST_RUN is supported if we get EINTR. - return nil - } - return err -}) - -func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) { - if uint(repeat) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("repeat is too high") - } - - if len(in) == 0 { - return 0, nil, 0, fmt.Errorf("missing input") - } - - if uint(len(in)) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("input is too long") - } - - if err := haveProgTestRun(); err != nil { - return 0, nil, 0, err - } - - // Older kernels ignore the dataSizeOut argument when copying to user space. - // Combined with things like bpf_xdp_adjust_head() we don't really know what the final - // size will be. Hence we allocate an output buffer which we hope will always be large - // enough, and panic if the kernel wrote past the end of the allocation. - // See https://patchwork.ozlabs.org/cover/1006822/ - out := make([]byte, len(in)+outputPad) - - fd, err := p.fd.Value() - if err != nil { - return 0, nil, 0, err - } - - attr := bpfProgTestRunAttr{ - fd: fd, - dataSizeIn: uint32(len(in)), - dataSizeOut: uint32(len(out)), - dataIn: internal.NewSlicePointer(in), - dataOut: internal.NewSlicePointer(out), - repeat: uint32(repeat), - } - - for { - err = bpfProgTestRun(&attr) - if err == nil { - break - } - - if errors.Is(err, unix.EINTR) { - if reset != nil { - reset() - } - continue - } - - return 0, nil, 0, fmt.Errorf("can't run test: %w", err) - } - - if int(attr.dataSizeOut) > cap(out) { - // Houston, we have a problem. The program created more data than we allocated, - // and the kernel wrote past the end of our buffer. - panic("kernel wrote past end of output buffer") - } - out = out[:int(attr.dataSizeOut)] - - total := time.Duration(attr.duration) * time.Nanosecond - return attr.retval, out, total, nil -} - -func unmarshalProgram(buf []byte) (*Program, error) { - if len(buf) != 4 { - return nil, errors.New("program id requires 4 byte value") - } - - // Looking up an entry in a nested map or prog array returns an id, - // not an fd. - id := internal.NativeEndian.Uint32(buf) - return NewProgramFromID(ProgramID(id)) -} - -func marshalProgram(p *Program, length int) ([]byte, error) { - if length != 4 { - return nil, fmt.Errorf("can't marshal program to %d bytes", length) - } - - value, err := p.fd.Value() - if err != nil { - return nil, err - } - - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, value) - return buf, nil -} - -// Attach a Program. -// -// Deprecated: use link.RawAttachProgram instead. -func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgAttachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: pfd, - AttachType: uint32(typ), - AttachFlags: uint32(flags), - } - - return internal.BPFProgAttach(&attr) -} - -// Detach a Program. -// -// Deprecated: use link.RawDetachProgram instead. -func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - if flags != 0 { - return errors.New("flags must be zero") - } - - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgDetachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: pfd, - AttachType: uint32(typ), - } - - return internal.BPFProgDetach(&attr) -} - -// LoadPinnedProgram loads a Program from a BPF file. -// -// Requires at least Linux 4.11. -func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) - if err != nil { - return nil, err - } - - info, err := newProgramInfoFromFd(fd) - if err != nil { - _ = fd.Close() - return nil, fmt.Errorf("info for %s: %w", fileName, err) - } - - return &Program{"", fd, filepath.Base(fileName), fileName, info.Type}, nil -} - -// SanitizeName replaces all invalid characters in name with replacement. -// Passing a negative value for replacement will delete characters instead -// of replacing them. Use this to automatically generate valid names for maps -// and programs at runtime. -// -// The set of allowed characters depends on the running kernel version. -// Dots are only allowed as of kernel 5.2. -func SanitizeName(name string, replacement rune) string { - return strings.Map(func(char rune) rune { - if invalidBPFObjNameChar(char) { - return replacement - } - return char - }, name) -} - -// ProgramGetNextID returns the ID of the next eBPF program. -// -// Returns ErrNotExist, if there is no next eBPF program. -func ProgramGetNextID(startID ProgramID) (ProgramID, error) { - id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID)) - return ProgramID(id), err -} - -// ID returns the systemwide unique ID of the program. -// -// Deprecated: use ProgramInfo.ID() instead. -func (p *Program) ID() (ProgramID, error) { - info, err := bpfGetProgInfoByFD(p.fd) - if err != nil { - return ProgramID(0), err - } - return ProgramID(info.id), nil -} - -func resolveBTFType(kernel *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { - type match struct { - p ProgramType - a AttachType - } - - var target btf.Type - var typeName, featureName string - switch (match{progType, attachType}) { - case match{LSM, AttachLSMMac}: - target = new(btf.Func) - typeName = "bpf_lsm_" + name - featureName = name + " LSM hook" - - case match{Tracing, AttachTraceIter}: - target = new(btf.Func) - typeName = "bpf_iter_" + name - featureName = name + " iterator" - - default: - return nil, nil - } - - if kernel == nil { - var err error - kernel, err = btf.LoadKernelSpec() - if err != nil { - return nil, fmt.Errorf("load kernel spec: %w", err) - } - } - - err := kernel.FindType(typeName, target) - if errors.Is(err, btf.ErrNotFound) { - return nil, &internal.UnsupportedFeatureError{ - Name: featureName, - } - } - if err != nil { - return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err) - } - return target, nil -} diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh deleted file mode 100644 index e2437be..0000000 --- a/vendor/github.com/cilium/ebpf/run-tests.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Test the current package under a different kernel. -# Requires virtme and qemu to be installed. -# Examples: -# Run all tests on a 5.4 kernel -# $ ./run-tests.sh 5.4 -# Run a subset of tests: -# $ ./run-tests.sh 5.4 go test ./link - -set -euo pipefail - -script="$(realpath "$0")" -readonly script - -# This script is a bit like a Matryoshka doll since it keeps re-executing itself -# in various different contexts: -# -# 1. invoked by the user like run-tests.sh 5.4 -# 2. invoked by go test like run-tests.sh --exec-vm -# 3. invoked by init in the vm like run-tests.sh --exec-test -# -# This allows us to use all available CPU on the host machine to compile our -# code, and then only use the VM to execute the test. This is because the VM -# is usually slower at compiling than the host. -if [[ "${1:-}" = "--exec-vm" ]]; then - shift - - input="$1" - shift - - # Use sudo if /dev/kvm isn't accessible by the current user. - sudo="" - if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then - sudo="sudo" - fi - readonly sudo - - testdir="$(dirname "$1")" - output="$(mktemp -d)" - printf -v cmd "%q " "$@" - - if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then - # stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a - # blocking substitute. - mkfifo "${output}/fake-stdin" - # Open for reading and writing to avoid blocking. - exec 0<> "${output}/fake-stdin" - rm "${output}/fake-stdin" - fi - - $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ - --rwdir="${testdir}=${testdir}" \ - --rodir=/run/input="${input}" \ - --rwdir=/run/output="${output}" \ - --script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \ - --qemu-opts -smp 2 # need at least two CPUs for some tests - - if [[ ! -e "${output}/success" ]]; then - exit 1 - fi - - $sudo rm -r "$output" - exit 0 -elif [[ "${1:-}" = "--exec-test" ]]; then - shift - - mount -t bpf bpf /sys/fs/bpf - mount -t tracefs tracefs /sys/kernel/debug/tracing - - if [[ -d "/run/input/bpf" ]]; then - export KERNEL_SELFTESTS="/run/input/bpf" - fi - - dmesg -C - if ! "$@"; then - dmesg - exit 1 - fi - touch "/run/output/success" - exit 0 -fi - -readonly kernel_version="${1:-}" -if [[ -z "${kernel_version}" ]]; then - echo "Expecting kernel version as first argument" - exit 1 -fi -shift - -readonly kernel="linux-${kernel_version}.bz" -readonly selftests="linux-${kernel_version}-selftests-bpf.bz" -readonly input="$(mktemp -d)" -readonly tmp_dir="${TMPDIR:-/tmp}" -readonly branch="${BRANCH:-master}" - -fetch() { - echo Fetching "${1}" - wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" -} - -fetch "${kernel}" -cp "${tmp_dir}/${kernel}" "${input}/bzImage" - -if fetch "${selftests}"; then - mkdir "${input}/bpf" - tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf" -else - echo "No selftests found, disabling" -fi - -args=(-v -short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) -if (( $# > 0 )); then - args=("$@") -fi - -export GOFLAGS=-mod=readonly -export CGO_ENABLED=0 - -echo Testing on "${kernel_version}" -go test -exec "$script --exec-vm $input" "${args[@]}" -echo "Test successful on ${kernel_version}" - -rm -r "${input}" diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go deleted file mode 100644 index f5a3854..0000000 --- a/vendor/github.com/cilium/ebpf/syscalls.go +++ /dev/null @@ -1,480 +0,0 @@ -package ebpf - -import ( - "errors" - "fmt" - "os" - "unsafe" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" - "github.com/cilium/ebpf/internal/unix" -) - -// ErrNotExist is returned when loading a non-existing map or program. -// -// Deprecated: use os.ErrNotExist instead. -var ErrNotExist = os.ErrNotExist - -// invalidBPFObjNameChar returns true if char may not appear in -// a BPF object name. -func invalidBPFObjNameChar(char rune) bool { - dotAllowed := objNameAllowsDot() == nil - - switch { - case char >= 'A' && char <= 'Z': - return false - case char >= 'a' && char <= 'z': - return false - case char >= '0' && char <= '9': - return false - case dotAllowed && char == '.': - return false - case char == '_': - return false - default: - return true - } -} - -type bpfMapOpAttr struct { - mapFd uint32 - padding uint32 - key internal.Pointer - value internal.Pointer - flags uint64 -} - -type bpfBatchMapOpAttr struct { - inBatch internal.Pointer - outBatch internal.Pointer - keys internal.Pointer - values internal.Pointer - count uint32 - mapFd uint32 - elemFlags uint64 - flags uint64 -} - -type bpfMapInfo struct { - map_type uint32 // since 4.12 1e2709769086 - id uint32 - key_size uint32 - value_size uint32 - max_entries uint32 - map_flags uint32 - name internal.BPFObjName // since 4.15 ad5b177bd73f - ifindex uint32 // since 4.16 52775b33bb50 - btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6 - netns_dev uint64 // since 4.16 52775b33bb50 - netns_ino uint64 - btf_id uint32 // since 4.18 78958fca7ead - btf_key_type_id uint32 // since 4.18 9b2cf328b2ec - btf_value_type_id uint32 -} - -type bpfProgLoadAttr struct { - progType ProgramType - insCount uint32 - instructions internal.Pointer - license internal.Pointer - logLevel uint32 - logSize uint32 - logBuf internal.Pointer - kernelVersion uint32 // since 4.1 2541517c32be - progFlags uint32 // since 4.11 e07b98d9bffe - progName internal.BPFObjName // since 4.15 067cae47771c - progIfIndex uint32 // since 4.15 1f6f4cb7ba21 - expectedAttachType AttachType // since 4.17 5e43f899b03a - progBTFFd uint32 - funcInfoRecSize uint32 - funcInfo internal.Pointer - funcInfoCnt uint32 - lineInfoRecSize uint32 - lineInfo internal.Pointer - lineInfoCnt uint32 - attachBTFID btf.TypeID - attachProgFd uint32 -} - -type bpfProgInfo struct { - prog_type uint32 - id uint32 - tag [unix.BPF_TAG_SIZE]byte - jited_prog_len uint32 - xlated_prog_len uint32 - jited_prog_insns internal.Pointer - xlated_prog_insns internal.Pointer - load_time uint64 // since 4.15 cb4d2b3f03d8 - created_by_uid uint32 - nr_map_ids uint32 - map_ids internal.Pointer - name internal.BPFObjName // since 4.15 067cae47771c - ifindex uint32 - gpl_compatible uint32 - netns_dev uint64 - netns_ino uint64 - nr_jited_ksyms uint32 - nr_jited_func_lens uint32 - jited_ksyms internal.Pointer - jited_func_lens internal.Pointer - btf_id uint32 - func_info_rec_size uint32 - func_info internal.Pointer - nr_func_info uint32 - nr_line_info uint32 - line_info internal.Pointer - jited_line_info internal.Pointer - nr_jited_line_info uint32 - line_info_rec_size uint32 - jited_line_info_rec_size uint32 - nr_prog_tags uint32 - prog_tags internal.Pointer - run_time_ns uint64 - run_cnt uint64 -} - -type bpfProgTestRunAttr struct { - fd uint32 - retval uint32 - dataSizeIn uint32 - dataSizeOut uint32 - dataIn internal.Pointer - dataOut internal.Pointer - repeat uint32 - duration uint32 -} - -type bpfGetFDByIDAttr struct { - id uint32 - next uint32 -} - -type bpfMapFreezeAttr struct { - mapFd uint32 -} - -type bpfObjGetNextIDAttr struct { - startID uint32 - nextID uint32 - openFlags uint32 -} - -func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) { - for { - fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - // As of ~4.20 the verifier can be interrupted by a signal, - // and returns EAGAIN in that case. - if errors.Is(err, unix.EAGAIN) { - continue - } - - if err != nil { - return nil, err - } - - return internal.NewFD(uint32(fd)), nil - } -} - -func bpfProgTestRun(attr *bpfProgTestRunAttr) error { - _, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { - _, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(ArrayOfMaps), - KeySize: 4, - ValueSize: 4, - MaxEntries: 1, - // Invalid file descriptor. - InnerMapFd: ^uint32(0), - }) - if errors.Is(err, unix.EINVAL) { - return internal.ErrNotSupported - } - if errors.Is(err, unix.EBADF) { - return nil - } - return err -}) - -var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error { - // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since - // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), - KeySize: 4, - ValueSize: 4, - MaxEntries: 1, - Flags: unix.BPF_F_RDONLY_PROG, - }) - if err != nil { - return internal.ErrNotSupported - } - _ = m.Close() - return nil -}) - -var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error { - // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), - KeySize: 4, - ValueSize: 4, - MaxEntries: 1, - Flags: unix.BPF_F_MMAPABLE, - }) - if err != nil { - return internal.ErrNotSupported - } - _ = m.Close() - return nil -}) - -var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { - // This checks BPF_F_INNER_MAP, which appeared in 5.10. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), - KeySize: 4, - ValueSize: 4, - MaxEntries: 1, - Flags: unix.BPF_F_INNER_MAP, - }) - if err != nil { - return internal.ErrNotSupported - } - _ = m.Close() - return nil -}) - -func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - flags: flags, - } - _, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - } - _, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: nextKeyOut, - } - _, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) { - attr := bpfObjGetNextIDAttr{ - startID: start, - } - _, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return attr.nextID, err -} - -func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) { - fd, err := m.Value() - if err != nil { - return 0, err - } - - attr := bpfBatchMapOpAttr{ - inBatch: inBatch, - outBatch: outBatch, - keys: keys, - values: values, - count: count, - mapFd: fd, - } - if opts != nil { - attr.elemFlags = opts.ElemFlags - attr.flags = opts.Flags - } - _, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - // always return count even on an error, as things like update might partially be fulfilled. - return attr.count, wrapMapError(err) -} - -func wrapMapError(err error) error { - if err == nil { - return nil - } - - if errors.Is(err, unix.ENOENT) { - return internal.SyscallError(ErrKeyNotExist, unix.ENOENT) - } - - if errors.Is(err, unix.EEXIST) { - return internal.SyscallError(ErrKeyExist, unix.EEXIST) - } - - if errors.Is(err, unix.ENOTSUPP) { - return internal.SyscallError(ErrNotSupported, unix.ENOTSUPP) - } - - return err -} - -func bpfMapFreeze(m *internal.FD) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapFreezeAttr{ - mapFd: fd, - } - _, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return err -} - -func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) { - var info bpfProgInfo - if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { - return nil, fmt.Errorf("can't get program info: %w", err) - } - return &info, nil -} - -func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) { - var info bpfMapInfo - err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { - return nil, fmt.Errorf("can't get map info: %w", err) - } - return &info, nil -} - -var haveObjName = internal.FeatureTest("object names", "4.15", func() error { - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), - KeySize: 4, - ValueSize: 4, - MaxEntries: 1, - MapName: internal.NewBPFObjName("feature_test"), - } - - fd, err := internal.BPFMapCreate(&attr) - if err != nil { - return internal.ErrNotSupported - } - - _ = fd.Close() - return nil -}) - -var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() error { - if err := haveObjName(); err != nil { - return err - } - - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), - KeySize: 4, - ValueSize: 4, - MaxEntries: 1, - MapName: internal.NewBPFObjName(".test"), - } - - fd, err := internal.BPFMapCreate(&attr) - if err != nil { - return internal.ErrNotSupported - } - - _ = fd.Close() - return nil -}) - -var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { - var maxEntries uint32 = 2 - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Hash), - KeySize: 4, - ValueSize: 4, - MaxEntries: maxEntries, - } - - fd, err := internal.BPFMapCreate(&attr) - if err != nil { - return internal.ErrNotSupported - } - defer fd.Close() - keys := []uint32{1, 2} - values := []uint32{3, 4} - kp, _ := marshalPtr(keys, 8) - vp, _ := marshalPtr(values, 8) - nilPtr := internal.NewPointer(nil) - _, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil) - if err != nil { - return internal.ErrNotSupported - } - return nil -}) - -func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) { - attr := bpfGetFDByIDAttr{ - id: id, - } - ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return internal.NewFD(uint32(ptr)), err -} diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go deleted file mode 100644 index 441a82f..0000000 --- a/vendor/github.com/cilium/ebpf/types.go +++ /dev/null @@ -1,248 +0,0 @@ -package ebpf - -import ( - "github.com/cilium/ebpf/internal/unix" -) - -//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType - -// MapType indicates the type map structure -// that will be initialized in the kernel. -type MapType uint32 - -// All the various map types that can be created -const ( - UnspecifiedMap MapType = iota - // Hash is a hash map - Hash - // Array is an array map - Array - // ProgramArray - A program array map is a special kind of array map whose map - // values contain only file descriptors referring to other eBPF - // programs. Thus, both the key_size and value_size must be - // exactly four bytes. This map is used in conjunction with the - // TailCall helper. - ProgramArray - // PerfEventArray - A perf event array is used in conjunction with PerfEventRead - // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers. - PerfEventArray - // PerCPUHash - This data structure is useful for people who have high performance - // network needs and can reconcile adds at the end of some cycle, so that - // hashes can be lock free without the use of XAdd, which can be costly. - PerCPUHash - // PerCPUArray - This data structure is useful for people who have high performance - // network needs and can reconcile adds at the end of some cycle, so that - // hashes can be lock free without the use of XAdd, which can be costly. - // Each CPU gets a copy of this hash, the contents of all of which can be reconciled - // later. - PerCPUArray - // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with - // GetStackID - StackTrace - // CGroupArray - This is a very niche structure used to help SKBInCGroup determine - // if an skb is from a socket belonging to a specific cgroup - CGroupArray - // LRUHash - This allows you to create a small hash structure that will purge the - // least recently used items rather than thow an error when you run out of memory - LRUHash - // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, - // it has more to do with including the CPU id with the LRU calculation so that if a - // particular CPU is using a value over-and-over again, then it will be saved, but if - // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically - // giving weight to CPU locality over overall usage. - LRUCPUHash - // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful, - // for storing things like IP addresses which can be bit masked allowing for keys of differing - // values to refer to the same reference based on their masks. See wikipedia for more details. - LPMTrie - // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps - // itself. - ArrayOfMaps - // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps - // itself. - HashOfMaps - // DevMap - Specialized map to store references to network devices. - DevMap - // SockMap - Specialized map to store references to sockets. - SockMap - // CPUMap - Specialized map to store references to CPUs. - CPUMap - // XSKMap - Specialized map for XDP programs to store references to open sockets. - XSKMap - // SockHash - Specialized hash to store references to sockets. - SockHash - // CGroupStorage - Special map for CGroups. - CGroupStorage - // ReusePortSockArray - Specialized map to store references to sockets that can be reused. - ReusePortSockArray - // PerCPUCGroupStorage - Special per CPU map for CGroups. - PerCPUCGroupStorage - // Queue - FIFO storage for BPF programs. - Queue - // Stack - LIFO storage for BPF programs. - Stack - // SkStorage - Specialized map for local storage at SK for BPF programs. - SkStorage - // DevMapHash - Hash-based indexing scheme for references to network devices. - DevMapHash - StructOpts - RingBuf - InodeStorage - TaskStorage -) - -// hasPerCPUValue returns true if the Map stores a value per CPU. -func (mt MapType) hasPerCPUValue() bool { - return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash -} - -// canStoreMap returns true if the map type accepts a map fd -// for update and returns a map id for lookup. -func (mt MapType) canStoreMap() bool { - return mt == ArrayOfMaps || mt == HashOfMaps -} - -// canStoreProgram returns true if the map type accepts a program fd -// for update and returns a program id for lookup. -func (mt MapType) canStoreProgram() bool { - return mt == ProgramArray -} - -// ProgramType of the eBPF program -type ProgramType uint32 - -// eBPF program types -const ( - UnspecifiedProgram ProgramType = iota - SocketFilter - Kprobe - SchedCLS - SchedACT - TracePoint - XDP - PerfEvent - CGroupSKB - CGroupSock - LWTIn - LWTOut - LWTXmit - SockOps - SkSKB - CGroupDevice - SkMsg - RawTracepoint - CGroupSockAddr - LWTSeg6Local - LircMode2 - SkReuseport - FlowDissector - CGroupSysctl - RawTracepointWritable - CGroupSockopt - Tracing - StructOps - Extension - LSM - SkLookup -) - -// AttachType of the eBPF program, needed to differentiate allowed context accesses in -// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. -// Will cause invalid argument (EINVAL) at program load time if set incorrectly. -type AttachType uint32 - -// AttachNone is an alias for AttachCGroupInetIngress for readability reasons. -const AttachNone AttachType = 0 - -const ( - AttachCGroupInetIngress AttachType = iota - AttachCGroupInetEgress - AttachCGroupInetSockCreate - AttachCGroupSockOps - AttachSkSKBStreamParser - AttachSkSKBStreamVerdict - AttachCGroupDevice - AttachSkMsgVerdict - AttachCGroupInet4Bind - AttachCGroupInet6Bind - AttachCGroupInet4Connect - AttachCGroupInet6Connect - AttachCGroupInet4PostBind - AttachCGroupInet6PostBind - AttachCGroupUDP4Sendmsg - AttachCGroupUDP6Sendmsg - AttachLircMode2 - AttachFlowDissector - AttachCGroupSysctl - AttachCGroupUDP4Recvmsg - AttachCGroupUDP6Recvmsg - AttachCGroupGetsockopt - AttachCGroupSetsockopt - AttachTraceRawTp - AttachTraceFEntry - AttachTraceFExit - AttachModifyReturn - AttachLSMMac - AttachTraceIter - AttachCgroupInet4GetPeername - AttachCgroupInet6GetPeername - AttachCgroupInet4GetSockname - AttachCgroupInet6GetSockname - AttachXDPDevMap - AttachCgroupInetSockRelease - AttachXDPCPUMap - AttachSkLookup - AttachXDP -) - -// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command -type AttachFlags uint32 - -// PinType determines whether a map is pinned into a BPFFS. -type PinType int - -// Valid pin types. -// -// Mirrors enum libbpf_pin_type. -const ( - PinNone PinType = iota - // Pin an object by using its name as the filename. - PinByName -) - -// LoadPinOptions control how a pinned object is loaded. -type LoadPinOptions struct { - // Request a read-only or write-only object. The default is a read-write - // object. Only one of the flags may be set. - ReadOnly bool - WriteOnly bool - - // Raw flags for the syscall. Other fields of this struct take precedence. - Flags uint32 -} - -// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter. -func (lpo *LoadPinOptions) Marshal() uint32 { - if lpo == nil { - return 0 - } - - flags := lpo.Flags - if lpo.ReadOnly { - flags |= unix.BPF_F_RDONLY - } - if lpo.WriteOnly { - flags |= unix.BPF_F_WRONLY - } - return flags -} - -// BatchOptions batch map operations options -// -// Mirrors libbpf struct bpf_map_batch_opts -// Currently BPF_F_FLAG is the only supported -// flag (for ElemFlags). -type BatchOptions struct { - ElemFlags uint64 - Flags uint64 -} diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go deleted file mode 100644 index c25f765..0000000 --- a/vendor/github.com/cilium/ebpf/types_string.go +++ /dev/null @@ -1,172 +0,0 @@ -// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType"; DO NOT EDIT. - -package ebpf - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[UnspecifiedMap-0] - _ = x[Hash-1] - _ = x[Array-2] - _ = x[ProgramArray-3] - _ = x[PerfEventArray-4] - _ = x[PerCPUHash-5] - _ = x[PerCPUArray-6] - _ = x[StackTrace-7] - _ = x[CGroupArray-8] - _ = x[LRUHash-9] - _ = x[LRUCPUHash-10] - _ = x[LPMTrie-11] - _ = x[ArrayOfMaps-12] - _ = x[HashOfMaps-13] - _ = x[DevMap-14] - _ = x[SockMap-15] - _ = x[CPUMap-16] - _ = x[XSKMap-17] - _ = x[SockHash-18] - _ = x[CGroupStorage-19] - _ = x[ReusePortSockArray-20] - _ = x[PerCPUCGroupStorage-21] - _ = x[Queue-22] - _ = x[Stack-23] - _ = x[SkStorage-24] - _ = x[DevMapHash-25] - _ = x[StructOpts-26] - _ = x[RingBuf-27] - _ = x[InodeStorage-28] - _ = x[TaskStorage-29] -} - -const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOptsRingBufInodeStorageTaskStorage" - -var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 258, 265, 277, 288} - -func (i MapType) String() string { - if i >= MapType(len(_MapType_index)-1) { - return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[UnspecifiedProgram-0] - _ = x[SocketFilter-1] - _ = x[Kprobe-2] - _ = x[SchedCLS-3] - _ = x[SchedACT-4] - _ = x[TracePoint-5] - _ = x[XDP-6] - _ = x[PerfEvent-7] - _ = x[CGroupSKB-8] - _ = x[CGroupSock-9] - _ = x[LWTIn-10] - _ = x[LWTOut-11] - _ = x[LWTXmit-12] - _ = x[SockOps-13] - _ = x[SkSKB-14] - _ = x[CGroupDevice-15] - _ = x[SkMsg-16] - _ = x[RawTracepoint-17] - _ = x[CGroupSockAddr-18] - _ = x[LWTSeg6Local-19] - _ = x[LircMode2-20] - _ = x[SkReuseport-21] - _ = x[FlowDissector-22] - _ = x[CGroupSysctl-23] - _ = x[RawTracepointWritable-24] - _ = x[CGroupSockopt-25] - _ = x[Tracing-26] - _ = x[StructOps-27] - _ = x[Extension-28] - _ = x[LSM-29] - _ = x[SkLookup-30] -} - -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookup" - -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294} - -func (i ProgramType) String() string { - if i >= ProgramType(len(_ProgramType_index)-1) { - return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[AttachNone-0] - _ = x[AttachCGroupInetIngress-0] - _ = x[AttachCGroupInetEgress-1] - _ = x[AttachCGroupInetSockCreate-2] - _ = x[AttachCGroupSockOps-3] - _ = x[AttachSkSKBStreamParser-4] - _ = x[AttachSkSKBStreamVerdict-5] - _ = x[AttachCGroupDevice-6] - _ = x[AttachSkMsgVerdict-7] - _ = x[AttachCGroupInet4Bind-8] - _ = x[AttachCGroupInet6Bind-9] - _ = x[AttachCGroupInet4Connect-10] - _ = x[AttachCGroupInet6Connect-11] - _ = x[AttachCGroupInet4PostBind-12] - _ = x[AttachCGroupInet6PostBind-13] - _ = x[AttachCGroupUDP4Sendmsg-14] - _ = x[AttachCGroupUDP6Sendmsg-15] - _ = x[AttachLircMode2-16] - _ = x[AttachFlowDissector-17] - _ = x[AttachCGroupSysctl-18] - _ = x[AttachCGroupUDP4Recvmsg-19] - _ = x[AttachCGroupUDP6Recvmsg-20] - _ = x[AttachCGroupGetsockopt-21] - _ = x[AttachCGroupSetsockopt-22] - _ = x[AttachTraceRawTp-23] - _ = x[AttachTraceFEntry-24] - _ = x[AttachTraceFExit-25] - _ = x[AttachModifyReturn-26] - _ = x[AttachLSMMac-27] - _ = x[AttachTraceIter-28] - _ = x[AttachCgroupInet4GetPeername-29] - _ = x[AttachCgroupInet6GetPeername-30] - _ = x[AttachCgroupInet4GetSockname-31] - _ = x[AttachCgroupInet6GetSockname-32] - _ = x[AttachXDPDevMap-33] - _ = x[AttachCgroupInetSockRelease-34] - _ = x[AttachXDPCPUMap-35] - _ = x[AttachSkLookup-36] - _ = x[AttachXDP-37] -} - -const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIterAttachCgroupInet4GetPeernameAttachCgroupInet6GetPeernameAttachCgroupInet4GetSocknameAttachCgroupInet6GetSocknameAttachXDPDevMapAttachCgroupInetSockReleaseAttachXDPCPUMapAttachSkLookupAttachXDP" - -var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582, 610, 638, 666, 694, 709, 736, 751, 765, 774} - -func (i AttachType) String() string { - if i >= AttachType(len(_AttachType_index)-1) { - return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[PinNone-0] - _ = x[PinByName-1] -} - -const _PinType_name = "PinNonePinByName" - -var _PinType_index = [...]uint8{0, 7, 16} - -func (i PinType) String() string { - if i < 0 || i >= PinType(len(_PinType_index)-1) { - return "PinType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _PinType_name[_PinType_index[i]:_PinType_index[i+1]] -} diff --git a/vendor/github.com/containerd/btrfs/.gitignore b/vendor/github.com/containerd/btrfs/.gitignore deleted file mode 100644 index 9b781b5..0000000 --- a/vendor/github.com/containerd/btrfs/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -bin/ - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# Support running go modules in vendor mode for local development -/vendor/ diff --git a/vendor/github.com/containerd/btrfs/LICENSE b/vendor/github.com/containerd/btrfs/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/github.com/containerd/btrfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/btrfs/README.md b/vendor/github.com/containerd/btrfs/README.md deleted file mode 100644 index 505f39b..0000000 --- a/vendor/github.com/containerd/btrfs/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# go-btrfs - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/btrfs)](https://pkg.go.dev/github.com/containerd/btrfs) -[![Build Status](https://github.com/containerd/btrfs/workflows/CI/badge.svg)](https://github.com/containerd/btrfs/actions?query=workflow%3ACI) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/btrfs)](https://goreportcard.com/report/github.com/containerd/btrfs) - -Native Go bindings for btrfs. - -# Status - -These are in the early stages. We will try to maintain stability, but please -vendor if you are relying on these directly. - -# Contribute - -This package may not cover all the use cases for btrfs. If something you need -is missing, please don't hesitate to submit a PR. - -Note that due to struct alignment issues, this isn't yet fully native. -Preferably, this could be resolved, so contributions in this direction are -greatly appreciated. - -## Applying License Header to New Files - -If you submit a contribution that adds a new file, please add the license -header. You can do so manually or use the `ltag` tool: - - -```console -$ go get github.com/kunalkushwaha/ltag -$ ltag -t ./license-templates -``` - -The above will add the appropriate licenses to Go files. New templates will -need to be added if other kinds of files are added. Please consult the -documentation at https://github.com/kunalkushwaha/ltag - -## Project details - -btrfs is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/btrfs/btrfs.c b/vendor/github.com/containerd/btrfs/btrfs.c deleted file mode 100644 index f0da012..0000000 --- a/vendor/github.com/containerd/btrfs/btrfs.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#include -#include -#include -#include - -#include "btrfs.h" - -void unpack_root_item(struct gosafe_btrfs_root_item* dst, struct btrfs_root_item* src) { - memcpy(dst->uuid, src->uuid, BTRFS_UUID_SIZE); - memcpy(dst->parent_uuid, src->parent_uuid, BTRFS_UUID_SIZE); - memcpy(dst->received_uuid, src->received_uuid, BTRFS_UUID_SIZE); - dst->gen = btrfs_root_generation(src); - dst->ogen = btrfs_root_otransid(src); - dst->flags = btrfs_root_flags(src); -} - -/* unpack_root_ref(struct gosafe_btrfs_root_ref* dst, struct btrfs_root_ref* src) { */ diff --git a/vendor/github.com/containerd/btrfs/btrfs.go b/vendor/github.com/containerd/btrfs/btrfs.go deleted file mode 100644 index f9c30b3..0000000 --- a/vendor/github.com/containerd/btrfs/btrfs.go +++ /dev/null @@ -1,412 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package btrfs - -/* -#include -#include -#include "btrfs.h" - -static char* get_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct) { - return btrfs_struct->name; -} -*/ -import "C" - -import ( - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/pkg/errors" -) - -// maxByteSliceSize is the smallest size that Go supports on various platforms. -// On mipsle, 1<<31-1 overflows the address space. -const maxByteSliceSize = 1 << 30 - -// IsSubvolume returns nil if the path is a valid subvolume. An error is -// returned if the path does not exist or the path is not a valid subvolume. -func IsSubvolume(path string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - if err := isFileInfoSubvol(fi); err != nil { - return err - } - - var statfs syscall.Statfs_t - if err := syscall.Statfs(path, &statfs); err != nil { - return err - } - - return isStatfsSubvol(&statfs) -} - -// SubvolID returns the subvolume ID for the provided path -func SubvolID(path string) (uint64, error) { - fp, err := openSubvolDir(path) - if err != nil { - return 0, err - } - defer fp.Close() - - return subvolID(fp.Fd()) -} - -// SubvolInfo returns information about the subvolume at the provided path. -func SubvolInfo(path string) (info Info, err error) { - path, err = filepath.EvalSymlinks(path) - if err != nil { - return info, err - } - - fp, err := openSubvolDir(path) - if err != nil { - return info, err - } - defer fp.Close() - - id, err := subvolID(fp.Fd()) - if err != nil { - return info, err - } - - subvolsByID, err := subvolMap(path) - if err != nil { - return info, err - } - - if info, ok := subvolsByID[id]; ok { - return *info, nil - } - - return info, errors.Errorf("%q not found", path) -} - -func subvolMap(path string) (map[uint64]*Info, error) { - fp, err := openSubvolDir(path) - if err != nil { - return nil, err - } - defer fp.Close() - - var args C.struct_btrfs_ioctl_search_args - - args.key.tree_id = C.BTRFS_ROOT_TREE_OBJECTID - args.key.min_type = C.BTRFS_ROOT_ITEM_KEY - args.key.max_type = C.BTRFS_ROOT_BACKREF_KEY - args.key.min_objectid = C.BTRFS_FS_TREE_OBJECTID - args.key.max_objectid = C.BTRFS_LAST_FREE_OBJECTID - args.key.max_offset = ^C.__u64(0) - args.key.max_transid = ^C.__u64(0) - - subvolsByID := make(map[uint64]*Info) - - for { - args.key.nr_items = 4096 - if err := ioctl(fp.Fd(), C.BTRFS_IOC_TREE_SEARCH, uintptr(unsafe.Pointer(&args))); err != nil { - return nil, err - } - - if args.key.nr_items == 0 { - break - } - - var ( - sh C.struct_btrfs_ioctl_search_header - shSize = unsafe.Sizeof(sh) - buf = (*[maxByteSliceSize]byte)(unsafe.Pointer(&args.buf[0]))[:C.BTRFS_SEARCH_ARGS_BUFSIZE] - ) - - for i := 0; i < int(args.key.nr_items); i++ { - sh = (*(*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&buf[0]))) - buf = buf[shSize:] - - info := subvolsByID[uint64(sh.objectid)] - if info == nil { - info = &Info{} - } - info.ID = uint64(sh.objectid) - - if sh._type == C.BTRFS_ROOT_BACKREF_KEY { - rr := (*(*C.struct_btrfs_root_ref)(unsafe.Pointer(&buf[0]))) - - // This branch processes the backrefs from the root object. We - // get an entry of the objectid, with name, but the parent is - // the offset. - - nname := C.btrfs_stack_root_ref_name_len(&rr) - name := string(buf[C.sizeof_struct_btrfs_root_ref : C.sizeof_struct_btrfs_root_ref+uintptr(nname)]) - - info.ID = uint64(sh.objectid) - info.ParentID = uint64(sh.offset) - info.Name = name - info.DirID = uint64(C.btrfs_stack_root_ref_dirid(&rr)) - - subvolsByID[uint64(sh.objectid)] = info - } else if sh._type == C.BTRFS_ROOT_ITEM_KEY && - (sh.objectid >= C.BTRFS_ROOT_ITEM_KEY || - sh.objectid == C.BTRFS_FS_TREE_OBJECTID) { - - var ( - ri = (*C.struct_btrfs_root_item)(unsafe.Pointer(&buf[0])) - gri C.struct_gosafe_btrfs_root_item - ) - - C.unpack_root_item(&gri, ri) - - if gri.flags&C.BTRFS_ROOT_SUBVOL_RDONLY != 0 { - info.Readonly = true - } - - // in this case, the offset is the actual offset. - info.Offset = uint64(sh.offset) - - info.UUID = uuidString(&gri.uuid) - info.ParentUUID = uuidString(&gri.parent_uuid) - info.ReceivedUUID = uuidString(&gri.received_uuid) - - info.Generation = uint64(gri.gen) - info.OriginalGeneration = uint64(gri.ogen) - - subvolsByID[uint64(sh.objectid)] = info - } - - args.key.min_objectid = sh.objectid - args.key.min_offset = sh.offset - args.key.min_type = sh._type // this is very questionable. - - buf = buf[sh.len:] - } - - args.key.min_offset++ - if args.key.min_offset == 0 { - args.key.min_type++ - } else { - continue - } - - if args.key.min_type > C.BTRFS_ROOT_BACKREF_KEY { - args.key.min_type = C.BTRFS_ROOT_ITEM_KEY - args.key.min_objectid++ - } else { - continue - } - - if args.key.min_objectid > args.key.max_objectid { - break - } - } - - mnt, err := findMountPoint(path) - if err != nil { - return nil, err - } - - for _, sv := range subvolsByID { - path := sv.Name - parentID := sv.ParentID - - for parentID != 0 { - parent, ok := subvolsByID[parentID] - if !ok { - break - } - - parentID = parent.ParentID - path = filepath.Join(parent.Name, path) - } - - sv.Path = filepath.Join(mnt, path) - } - return subvolsByID, nil -} - -// SubvolList will return the information for all subvolumes corresponding to -// the provided path. -func SubvolList(path string) ([]Info, error) { - subvolsByID, err := subvolMap(path) - if err != nil { - return nil, err - } - - subvols := make([]Info, 0, len(subvolsByID)) - for _, sv := range subvolsByID { - subvols = append(subvols, *sv) - } - - sort.Sort(infosByID(subvols)) - - return subvols, nil -} - -// SubvolCreate creates a subvolume at the provided path. -func SubvolCreate(path string) error { - dir, name := filepath.Split(path) - - fp, err := os.Open(dir) - if err != nil { - return err - } - defer fp.Close() - - var args C.struct_btrfs_ioctl_vol_args - args.fd = C.__s64(fp.Fd()) - - if len(name) > C.BTRFS_PATH_NAME_MAX { - return errors.Errorf("%q too long for subvolume", name) - } - nameptr := (*[maxByteSliceSize]byte)(unsafe.Pointer(&args.name[0]))[:C.BTRFS_PATH_NAME_MAX:C.BTRFS_PATH_NAME_MAX] - copy(nameptr[:C.BTRFS_PATH_NAME_MAX], []byte(name)) - - if err := ioctl(fp.Fd(), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))); err != nil { - return errors.Wrap(err, "btrfs subvolume create failed") - } - - return nil -} - -// SubvolSnapshot creates a snapshot in dst from src. If readonly is true, the -// snapshot will be readonly. -func SubvolSnapshot(dst, src string, readonly bool) error { - dstdir, dstname := filepath.Split(dst) - - dstfp, err := openSubvolDir(dstdir) - if err != nil { - return errors.Wrapf(err, "opening snapshot destination subvolume failed") - } - defer dstfp.Close() - - srcfp, err := openSubvolDir(src) - if err != nil { - return errors.Wrapf(err, "opening snapshot source subvolume failed") - } - defer srcfp.Close() - - // dstdir is the ioctl arg, wile srcdir gets set on the args - var args C.struct_btrfs_ioctl_vol_args_v2 - args.fd = C.__s64(srcfp.Fd()) - name := C.get_name_btrfs_ioctl_vol_args_v2(&args) - - if len(dstname) > C.BTRFS_SUBVOL_NAME_MAX { - return errors.Errorf("%q too long for subvolume", dstname) - } - - nameptr := (*[maxByteSliceSize]byte)(unsafe.Pointer(name))[:C.BTRFS_SUBVOL_NAME_MAX:C.BTRFS_SUBVOL_NAME_MAX] - copy(nameptr[:C.BTRFS_SUBVOL_NAME_MAX], []byte(dstname)) - - if readonly { - args.flags |= C.BTRFS_SUBVOL_RDONLY - } - - if err := ioctl(dstfp.Fd(), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))); err != nil { - return errors.Wrapf(err, "snapshot create failed") - } - - return nil -} - -// SubvolDelete deletes the subvolumes under the given path. -func SubvolDelete(path string) error { - dir, name := filepath.Split(path) - fp, err := openSubvolDir(dir) - if err != nil { - return errors.Wrapf(err, "failed opening %v", path) - } - defer fp.Close() - - // remove child subvolumes - if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) || p == path { - return nil - } - - return errors.Wrapf(err, "failed walking subvolume %v", p) - } - - if !fi.IsDir() { - return nil // just ignore it! - } - - if p == path { - return nil - } - - if err := isFileInfoSubvol(fi); err != nil { - return nil - } - - if err := SubvolDelete(p); err != nil { - return errors.Wrapf(err, "recursive delete of %v failed", p) - } - - return filepath.SkipDir // children get walked by call above. - }); err != nil { - return err - } - - var args C.struct_btrfs_ioctl_vol_args - if len(name) > C.BTRFS_SUBVOL_NAME_MAX { - return errors.Errorf("%q too long for subvolume", name) - } - - nameptr := (*[maxByteSliceSize]byte)(unsafe.Pointer(&args.name[0]))[:C.BTRFS_SUBVOL_NAME_MAX:C.BTRFS_SUBVOL_NAME_MAX] - copy(nameptr[:C.BTRFS_SUBVOL_NAME_MAX], []byte(name)) - - if err := ioctl(fp.Fd(), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))); err != nil { - return errors.Wrapf(err, "failed removing subvolume %v", path) - } - - return nil -} - -func openSubvolDir(path string) (*os.File, error) { - fp, err := os.Open(path) - if err != nil { - return nil, errors.Wrapf(err, "opening %v as subvolume failed", path) - } - - return fp, nil -} - -func isStatfsSubvol(statfs *syscall.Statfs_t) error { - if int64(statfs.Type) != int64(C.BTRFS_SUPER_MAGIC) { - return errors.Errorf("not a btrfs filesystem") - } - - return nil -} - -func isFileInfoSubvol(fi os.FileInfo) error { - if !fi.IsDir() { - errors.Errorf("must be a directory") - } - - stat := fi.Sys().(*syscall.Stat_t) - - if stat.Ino != C.BTRFS_FIRST_FREE_OBJECTID { - return errors.Errorf("incorrect inode type") - } - - return nil -} diff --git a/vendor/github.com/containerd/btrfs/btrfs.h b/vendor/github.com/containerd/btrfs/btrfs.h deleted file mode 100644 index 1ec451e..0000000 --- a/vendor/github.com/containerd/btrfs/btrfs.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#include -#include -#include -#include - -// unfortunately, we need to define "alignment safe" C structs to populate for -// packed structs that aren't handled by cgo. Fields will be added here, as -// needed. - -struct gosafe_btrfs_root_item { - u8 uuid[BTRFS_UUID_SIZE]; - u8 parent_uuid[BTRFS_UUID_SIZE]; - u8 received_uuid[BTRFS_UUID_SIZE]; - - u64 gen; - u64 ogen; - u64 flags; -}; - -void unpack_root_item(struct gosafe_btrfs_root_item* dst, struct btrfs_root_item* src); -/* void unpack_root_ref(struct gosafe_btrfs_root_ref* dst, struct btrfs_root_ref* src); */ diff --git a/vendor/github.com/containerd/btrfs/helpers.go b/vendor/github.com/containerd/btrfs/helpers.go deleted file mode 100644 index 475f1c6..0000000 --- a/vendor/github.com/containerd/btrfs/helpers.go +++ /dev/null @@ -1,102 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package btrfs - -/* -#include -#include -#include -*/ -import "C" - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - "unsafe" - - "github.com/pkg/errors" -) - -func subvolID(fd uintptr) (uint64, error) { - var args C.struct_btrfs_ioctl_ino_lookup_args - args.objectid = C.BTRFS_FIRST_FREE_OBJECTID - - if err := ioctl(fd, C.BTRFS_IOC_INO_LOOKUP, uintptr(unsafe.Pointer(&args))); err != nil { - return 0, err - } - - return uint64(args.treeid), nil -} - -var ( - zeroArray = [16]byte{} - zeros = zeroArray[:] -) - -func uuidString(uuid *[C.BTRFS_UUID_SIZE]C.u8) string { - b := (*[maxByteSliceSize]byte)(unsafe.Pointer(uuid))[:C.BTRFS_UUID_SIZE] - - if bytes.Equal(b, zeros) { - return "" - } - - return fmt.Sprintf("%x-%x-%x-%x-%x", b[:4], b[4:4+2], b[6:6+2], b[8:8+2], b[10:16]) -} - -func findMountPoint(path string) (string, error) { - fp, err := os.Open("/proc/self/mounts") - if err != nil { - return "", err - } - defer fp.Close() - - const ( - deviceIdx = 0 - pathIdx = 1 - typeIdx = 2 - options = 3 - ) - - var ( - mount string - scanner = bufio.NewScanner(fp) - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if fields[typeIdx] != "btrfs" { - continue // skip non-btrfs - } - - if strings.HasPrefix(path, fields[pathIdx]) { - mount = fields[pathIdx] - } - } - - if scanner.Err() != nil { - return "", scanner.Err() - } - - if mount == "" { - return "", errors.Errorf("mount point of %v not found", path) - } - - return mount, nil -} diff --git a/vendor/github.com/containerd/btrfs/info.go b/vendor/github.com/containerd/btrfs/info.go deleted file mode 100644 index 0f96be6..0000000 --- a/vendor/github.com/containerd/btrfs/info.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package btrfs - -// Info describes metadata about a btrfs subvolume. -type Info struct { - ID uint64 // subvolume id - ParentID uint64 // aka ref_tree - TopLevelID uint64 // not actually clear what this is, not set for now. - Offset uint64 // key offset for root - DirID uint64 - - Generation uint64 - OriginalGeneration uint64 - - UUID string - ParentUUID string - ReceivedUUID string - - Name string - Path string // absolute path of subvolume - Root string // path of root mount point - - Readonly bool // true if the snaps hot is readonly, extracted from flags -} - -type infosByID []Info - -func (b infosByID) Len() int { return len(b) } -func (b infosByID) Less(i, j int) bool { return b[i].ID < b[j].ID } -func (b infosByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/github.com/containerd/cgroups/.gitignore b/vendor/github.com/containerd/cgroups/.gitignore deleted file mode 100644 index 3465c14..0000000 --- a/vendor/github.com/containerd/cgroups/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -example/example -cmd/cgctl/cgctl diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/containerd/cgroups/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/cgroups/Makefile b/vendor/github.com/containerd/cgroups/Makefile deleted file mode 100644 index 19e6607..0000000 --- a/vendor/github.com/containerd/cgroups/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -PACKAGES=$(shell go list ./... | grep -v /vendor/) - -all: cgutil - go build -v - -cgutil: - cd cmd/cgctl && go build -v - -proto: - protobuild --quiet ${PACKAGES} diff --git a/vendor/github.com/containerd/cgroups/Protobuild.toml b/vendor/github.com/containerd/cgroups/Protobuild.toml deleted file mode 100644 index 1c4c802..0000000 --- a/vendor/github.com/containerd/cgroups/Protobuild.toml +++ /dev/null @@ -1,46 +0,0 @@ -version = "unstable" -generator = "gogoctrd" -plugins = ["grpc"] - -# Control protoc include paths. Below are usually some good defaults, but feel -# free to try it without them if it works for your project. -[includes] - # Include paths that will be added before all others. Typically, you want to - # treat the root of the project as an include, but this may not be necessary. - # before = ["."] - - # Paths that should be treated as include roots in relation to the vendor - # directory. These will be calculated with the vendor directory nearest the - # target package. - # vendored = ["github.com/gogo/protobuf"] - packages = ["github.com/gogo/protobuf"] - - # Paths that will be added untouched to the end of the includes. We use - # `/usr/local/include` to pickup the common install location of protobuf. - # This is the default. - after = ["/usr/local/include", "/usr/include"] - -# This section maps protobuf imports to Go packages. These will become -# `-M` directives in the call to the go protobuf generator. -[packages] - "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" - "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" - -# Aggregrate the API descriptors to lock down API changes. -[[descriptors]] -prefix = "github.com/containerd/cgroups/stats/v1" -target = "stats/v1/metrics.pb.txt" -ignore_files = [ - "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" -] -[[descriptors]] -prefix = "github.com/containerd/cgroups/v2/stats" -target = "v2/stats/metrics.pb.txt" -ignore_files = [ - "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" -] diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md deleted file mode 100644 index d4b09f3..0000000 --- a/vendor/github.com/containerd/cgroups/README.md +++ /dev/null @@ -1,149 +0,0 @@ -# cgroups - -[![Build Status](https://github.com/containerd/cgroups/workflows/CI/badge.svg)](https://github.com/containerd/cgroups/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups) -[![GoDoc](https://godoc.org/github.com/containerd/cgroups?status.svg)](https://godoc.org/github.com/containerd/cgroups) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/cgroups)](https://goreportcard.com/report/github.com/containerd/cgroups) - -Go package for creating, managing, inspecting, and destroying cgroups. -The resources format for settings on the cgroup uses the OCI runtime-spec found -[here](https://github.com/opencontainers/runtime-spec). - -## Examples - -### Create a new cgroup - -This creates a new cgroup using a static path for all subsystems under `/test`. - -* /sys/fs/cgroup/cpu/test -* /sys/fs/cgroup/memory/test -* etc.... - -It uses a single hierarchy and specifies cpu shares as a resource constraint and -uses the v1 implementation of cgroups. - - -```go -shares := uint64(100) -control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{ - CPU: &specs.CPU{ - Shares: &shares, - }, -}) -defer control.Delete() -``` - -### Create with systemd slice support - - -```go -control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{ - CPU: &specs.CPU{ - Shares: &shares, - }, -}) - -``` - -### Load an existing cgroup - -```go -control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test")) -``` - -### Add a process to the cgroup - -```go -if err := control.Add(cgroups.Process{Pid:1234}); err != nil { -} -``` - -### Update the cgroup - -To update the resources applied in the cgroup - -```go -shares = uint64(200) -if err := control.Update(&specs.LinuxResources{ - CPU: &specs.LinuxCPU{ - Shares: &shares, - }, -}); err != nil { -} -``` - -### Freeze and Thaw the cgroup - -```go -if err := control.Freeze(); err != nil { -} -if err := control.Thaw(); err != nil { -} -``` - -### List all processes in the cgroup or recursively - -```go -processes, err := control.Processes(cgroups.Devices, recursive) -``` - -### Get Stats on the cgroup - -```go -stats, err := control.Stat() -``` - -By adding `cgroups.IgnoreNotExist` all non-existent files will be ignored, e.g. swap memory stats without swap enabled -```go -stats, err := control.Stat(cgroups.IgnoreNotExist) -``` - -### Move process across cgroups - -This allows you to take processes from one cgroup and move them to another. - -```go -err := control.MoveTo(destination) -``` - -### Create subcgroup - -```go -subCgroup, err := control.New("child", resources) -``` - -### Registering for memory events - -This allows you to get notified by an eventfd for v1 memory cgroups events. - -```go -event := cgroups.MemoryThresholdEvent(50 * 1024 * 1024, false) -efd, err := control.RegisterMemoryEvent(event) -``` - -```go -event := cgroups.MemoryPressureEvent(cgroups.MediumPressure, cgroups.DefaultMode) -efd, err := control.RegisterMemoryEvent(event) -``` - -```go -efd, err := control.OOMEventFD() -// or by using RegisterMemoryEvent -event := cgroups.OOMEvent() -efd, err := control.RegisterMemoryEvent(event) -``` - -### Attention - -All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name - -## Project details - -Cgroups is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/cgroups/Vagrantfile b/vendor/github.com/containerd/cgroups/Vagrantfile deleted file mode 100644 index 4596ad8..0000000 --- a/vendor/github.com/containerd/cgroups/Vagrantfile +++ /dev/null @@ -1,46 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -Vagrant.configure("2") do |config| -# Fedora box is used for testing cgroup v2 support - config.vm.box = "fedora/32-cloud-base" - config.vm.provider :virtualbox do |v| - v.memory = 2048 - v.cpus = 2 - end - config.vm.provider :libvirt do |v| - v.memory = 2048 - v.cpus = 2 - end - config.vm.provision "shell", inline: <<-SHELL - set -eux -o pipefail - # configuration - GO_VERSION="1.15" - - # install gcc and Golang - dnf -y install gcc - curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local - - # setup env vars - cat >> /etc/profile.d/sh.local < /test.sh < 0 { - return nil - } - - // Even the kernel is compiled with the CFQ scheduler, the cgroup may not use - // block devices with the CFQ scheduler. If so, we should fallback to throttle.* files. - settings = []blkioStatSettings{ - { - name: "throttle.io_serviced", - entry: &stats.Blkio.IoServicedRecursive, - }, - { - name: "throttle.io_service_bytes", - entry: &stats.Blkio.IoServiceBytesRecursive, - }, - } - for _, t := range settings { - if err := b.readEntry(devices, path, t.name, t.entry); err != nil { - return err - } - } - return nil -} - -func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*v1.BlkIOEntry) error { - f, err := os.Open(filepath.Join(b.Path(path), "blkio."+name)) - if err != nil { - return err - } - defer f.Close() - sc := bufio.NewScanner(f) - for sc.Scan() { - // format: dev type amount - fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine) - if len(fields) < 3 { - if len(fields) == 2 && fields[0] == "Total" { - // skip total line - continue - } else { - return fmt.Errorf("invalid line found while parsing %s: %s", path, sc.Text()) - } - } - major, err := strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return err - } - minor, err := strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return err - } - op := "" - valueField := 2 - if len(fields) == 4 { - op = fields[2] - valueField = 3 - } - v, err := strconv.ParseUint(fields[valueField], 10, 64) - if err != nil { - return err - } - *entry = append(*entry, &v1.BlkIOEntry{ - Device: devices[deviceKey{major, minor}], - Major: major, - Minor: minor, - Op: op, - Value: v, - }) - } - return sc.Err() -} - -func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings { - settings := []blkioSettings{} - - if blkio.Weight != nil { - settings = append(settings, - blkioSettings{ - name: "weight", - value: blkio.Weight, - format: uintf, - }) - } - if blkio.LeafWeight != nil { - settings = append(settings, - blkioSettings{ - name: "leaf_weight", - value: blkio.LeafWeight, - format: uintf, - }) - } - for _, wd := range blkio.WeightDevice { - if wd.Weight != nil { - settings = append(settings, - blkioSettings{ - name: "weight_device", - value: wd, - format: weightdev, - }) - } - if wd.LeafWeight != nil { - settings = append(settings, - blkioSettings{ - name: "leaf_weight_device", - value: wd, - format: weightleafdev, - }) - } - } - for _, t := range []struct { - name string - list []specs.LinuxThrottleDevice - }{ - { - name: "throttle.read_bps_device", - list: blkio.ThrottleReadBpsDevice, - }, - { - name: "throttle.read_iops_device", - list: blkio.ThrottleReadIOPSDevice, - }, - { - name: "throttle.write_bps_device", - list: blkio.ThrottleWriteBpsDevice, - }, - { - name: "throttle.write_iops_device", - list: blkio.ThrottleWriteIOPSDevice, - }, - } { - for _, td := range t.list { - settings = append(settings, blkioSettings{ - name: t.name, - value: td, - format: throttleddev, - }) - } - } - return settings -} - -type blkioSettings struct { - name string - value interface{} - format func(v interface{}) []byte -} - -type blkioStatSettings struct { - name string - entry *[]*v1.BlkIOEntry -} - -func uintf(v interface{}) []byte { - return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10)) -} - -func weightdev(v interface{}) []byte { - wd := v.(specs.LinuxWeightDevice) - return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.Weight)) -} - -func weightleafdev(v interface{}) []byte { - wd := v.(specs.LinuxWeightDevice) - return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.LeafWeight)) -} - -func throttleddev(v interface{}) []byte { - td := v.(specs.LinuxThrottleDevice) - return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)) -} - -func splitBlkIOStatLine(r rune) bool { - return r == ' ' || r == ':' -} - -type deviceKey struct { - major, minor uint64 -} - -// getDevices makes a best effort attempt to read all the devices into a map -// keyed by major and minor number. Since devices may be mapped multiple times, -// we err on taking the first occurrence. -func getDevices(r io.Reader) (map[deviceKey]string, error) { - - var ( - s = bufio.NewScanner(r) - devices = make(map[deviceKey]string) - ) - for s.Scan() { - fields := strings.Fields(s.Text()) - major, err := strconv.Atoi(fields[0]) - if err != nil { - return nil, err - } - minor, err := strconv.Atoi(fields[1]) - if err != nil { - return nil, err - } - key := deviceKey{ - major: uint64(major), - minor: uint64(minor), - } - if _, ok := devices[key]; ok { - continue - } - devices[key] = filepath.Join("/dev", fields[2]) - } - return devices, s.Err() -} diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go deleted file mode 100644 index e0e014b..0000000 --- a/vendor/github.com/containerd/cgroups/cgroup.go +++ /dev/null @@ -1,552 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// New returns a new control via the cgroup cgroups interface -func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources, opts ...InitOpts) (Cgroup, error) { - config := newInitConfig() - for _, o := range opts { - if err := o(config); err != nil { - return nil, err - } - } - subsystems, err := hierarchy() - if err != nil { - return nil, err - } - var active []Subsystem - for _, s := range subsystems { - // check if subsystem exists - if err := initializeSubsystem(s, path, resources); err != nil { - if err == ErrControllerNotActive { - if config.InitCheck != nil { - if skerr := config.InitCheck(s, path, err); skerr != nil { - if skerr != ErrIgnoreSubsystem { - return nil, skerr - } - } - } - continue - } - return nil, err - } - active = append(active, s) - } - return &cgroup{ - path: path, - subsystems: active, - }, nil -} - -// Load will load an existing cgroup and allow it to be controlled -// All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name -func Load(hierarchy Hierarchy, path Path, opts ...InitOpts) (Cgroup, error) { - config := newInitConfig() - for _, o := range opts { - if err := o(config); err != nil { - return nil, err - } - } - var activeSubsystems []Subsystem - subsystems, err := hierarchy() - if err != nil { - return nil, err - } - // check that the subsystems still exist, and keep only those that actually exist - for _, s := range pathers(subsystems) { - p, err := path(s.Name()) - if err != nil { - if os.IsNotExist(errors.Cause(err)) { - return nil, ErrCgroupDeleted - } - if err == ErrControllerNotActive { - if config.InitCheck != nil { - if skerr := config.InitCheck(s, path, err); skerr != nil { - if skerr != ErrIgnoreSubsystem { - return nil, skerr - } - } - } - continue - } - return nil, err - } - if _, err := os.Lstat(s.Path(p)); err != nil { - if os.IsNotExist(err) { - continue - } - return nil, err - } - activeSubsystems = append(activeSubsystems, s) - } - // if we do not have any active systems then the cgroup is deleted - if len(activeSubsystems) == 0 { - return nil, ErrCgroupDeleted - } - return &cgroup{ - path: path, - subsystems: activeSubsystems, - }, nil -} - -type cgroup struct { - path Path - - subsystems []Subsystem - mu sync.Mutex - err error -} - -// New returns a new sub cgroup -func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - path := subPath(c.path, name) - for _, s := range c.subsystems { - if err := initializeSubsystem(s, path, resources); err != nil { - return nil, err - } - } - return &cgroup{ - path: path, - subsystems: c.subsystems, - }, nil -} - -// Subsystems returns all the subsystems that are currently being -// consumed by the group -func (c *cgroup) Subsystems() []Subsystem { - return c.subsystems -} - -// Add moves the provided process into the new cgroup -func (c *cgroup) Add(process Process) error { - if process.Pid <= 0 { - return ErrInvalidPid - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - return c.add(process) -} - -func (c *cgroup) add(process Process) error { - for _, s := range pathers(c.subsystems) { - p, err := c.path(s.Name()) - if err != nil { - return err - } - if err := retryingWriteFile( - filepath.Join(s.Path(p), cgroupProcs), - []byte(strconv.Itoa(process.Pid)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -// AddTask moves the provided tasks (threads) into the new cgroup -func (c *cgroup) AddTask(process Process) error { - if process.Pid <= 0 { - return ErrInvalidPid - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - return c.addTask(process) -} - -func (c *cgroup) addTask(process Process) error { - for _, s := range pathers(c.subsystems) { - p, err := c.path(s.Name()) - if err != nil { - return err - } - if err := retryingWriteFile( - filepath.Join(s.Path(p), cgroupTasks), - []byte(strconv.Itoa(process.Pid)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -// Delete will remove the control group from each of the subsystems registered -func (c *cgroup) Delete() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - var errs []string - for _, s := range c.subsystems { - if d, ok := s.(deleter); ok { - sp, err := c.path(s.Name()) - if err != nil { - return err - } - if err := d.Delete(sp); err != nil { - errs = append(errs, string(s.Name())) - } - continue - } - if p, ok := s.(pather); ok { - sp, err := c.path(s.Name()) - if err != nil { - return err - } - path := p.Path(sp) - if err := remove(path); err != nil { - errs = append(errs, path) - } - } - } - if len(errs) > 0 { - return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errs, ", ")) - } - c.err = ErrCgroupDeleted - return nil -} - -// Stat returns the current metrics for the cgroup -func (c *cgroup) Stat(handlers ...ErrorHandler) (*v1.Metrics, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - if len(handlers) == 0 { - handlers = append(handlers, errPassthrough) - } - var ( - stats = &v1.Metrics{ - CPU: &v1.CPUStat{ - Throttling: &v1.Throttle{}, - Usage: &v1.CPUUsage{}, - }, - } - wg = &sync.WaitGroup{} - errs = make(chan error, len(c.subsystems)) - ) - for _, s := range c.subsystems { - if ss, ok := s.(stater); ok { - sp, err := c.path(s.Name()) - if err != nil { - return nil, err - } - wg.Add(1) - go func() { - defer wg.Done() - if err := ss.Stat(sp, stats); err != nil { - for _, eh := range handlers { - if herr := eh(err); herr != nil { - errs <- herr - } - } - } - }() - } - } - wg.Wait() - close(errs) - for err := range errs { - return nil, err - } - return stats, nil -} - -// Update updates the cgroup with the new resource values provided -// -// Be prepared to handle EBUSY when trying to update a cgroup with -// live processes and other operations like Stats being performed at the -// same time -func (c *cgroup) Update(resources *specs.LinuxResources) error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - for _, s := range c.subsystems { - if u, ok := s.(updater); ok { - sp, err := c.path(s.Name()) - if err != nil { - return err - } - if err := u.Update(sp, resources); err != nil { - return err - } - } - } - return nil -} - -// Processes returns the processes running inside the cgroup along -// with the subsystem used, pid, and path -func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - return c.processes(subsystem, recursive) -} - -func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) { - s := c.getSubsystem(subsystem) - sp, err := c.path(subsystem) - if err != nil { - return nil, err - } - path := s.(pather).Path(sp) - var processes []Process - err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !recursive && info.IsDir() { - if p == path { - return nil - } - return filepath.SkipDir - } - dir, name := filepath.Split(p) - if name != cgroupProcs { - return nil - } - procs, err := readPids(dir, subsystem) - if err != nil { - return err - } - processes = append(processes, procs...) - return nil - }) - return processes, err -} - -// Tasks returns the tasks running inside the cgroup along -// with the subsystem used, pid, and path -func (c *cgroup) Tasks(subsystem Name, recursive bool) ([]Task, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - return c.tasks(subsystem, recursive) -} - -func (c *cgroup) tasks(subsystem Name, recursive bool) ([]Task, error) { - s := c.getSubsystem(subsystem) - sp, err := c.path(subsystem) - if err != nil { - return nil, err - } - path := s.(pather).Path(sp) - var tasks []Task - err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !recursive && info.IsDir() { - if p == path { - return nil - } - return filepath.SkipDir - } - dir, name := filepath.Split(p) - if name != cgroupTasks { - return nil - } - procs, err := readTasksPids(dir, subsystem) - if err != nil { - return err - } - tasks = append(tasks, procs...) - return nil - }) - return tasks, err -} - -// Freeze freezes the entire cgroup and all the processes inside it -func (c *cgroup) Freeze() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - s := c.getSubsystem(Freezer) - if s == nil { - return ErrFreezerNotSupported - } - sp, err := c.path(Freezer) - if err != nil { - return err - } - return s.(*freezerController).Freeze(sp) -} - -// Thaw thaws out the cgroup and all the processes inside it -func (c *cgroup) Thaw() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - s := c.getSubsystem(Freezer) - if s == nil { - return ErrFreezerNotSupported - } - sp, err := c.path(Freezer) - if err != nil { - return err - } - return s.(*freezerController).Thaw(sp) -} - -// OOMEventFD returns the memory cgroup's out of memory event fd that triggers -// when processes inside the cgroup receive an oom event. Returns -// ErrMemoryNotSupported if memory cgroups is not supported. -func (c *cgroup) OOMEventFD() (uintptr, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return 0, c.err - } - s := c.getSubsystem(Memory) - if s == nil { - return 0, ErrMemoryNotSupported - } - sp, err := c.path(Memory) - if err != nil { - return 0, err - } - return s.(*memoryController).memoryEvent(sp, OOMEvent()) -} - -// RegisterMemoryEvent allows the ability to register for all v1 memory cgroups -// notifications. -func (c *cgroup) RegisterMemoryEvent(event MemoryEvent) (uintptr, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return 0, c.err - } - s := c.getSubsystem(Memory) - if s == nil { - return 0, ErrMemoryNotSupported - } - sp, err := c.path(Memory) - if err != nil { - return 0, err - } - return s.(*memoryController).memoryEvent(sp, event) -} - -// State returns the state of the cgroup and its processes -func (c *cgroup) State() State { - c.mu.Lock() - defer c.mu.Unlock() - c.checkExists() - if c.err != nil && c.err == ErrCgroupDeleted { - return Deleted - } - s := c.getSubsystem(Freezer) - if s == nil { - return Thawed - } - sp, err := c.path(Freezer) - if err != nil { - return Unknown - } - state, err := s.(*freezerController).state(sp) - if err != nil { - return Unknown - } - return state -} - -// MoveTo does a recursive move subsystem by subsystem of all the processes -// inside the group -func (c *cgroup) MoveTo(destination Cgroup) error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - for _, s := range c.subsystems { - processes, err := c.processes(s.Name(), true) - if err != nil { - return err - } - for _, p := range processes { - if err := destination.Add(p); err != nil { - if strings.Contains(err.Error(), "no such process") { - continue - } - return err - } - } - } - return nil -} - -func (c *cgroup) getSubsystem(n Name) Subsystem { - for _, s := range c.subsystems { - if s.Name() == n { - return s - } - } - return nil -} - -func (c *cgroup) checkExists() { - for _, s := range pathers(c.subsystems) { - p, err := c.path(s.Name()) - if err != nil { - return - } - if _, err := os.Lstat(s.Path(p)); err != nil { - if os.IsNotExist(err) { - c.err = ErrCgroupDeleted - return - } - } - } -} diff --git a/vendor/github.com/containerd/cgroups/control.go b/vendor/github.com/containerd/cgroups/control.go deleted file mode 100644 index a4cb9b8..0000000 --- a/vendor/github.com/containerd/cgroups/control.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "os" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - cgroupProcs = "cgroup.procs" - cgroupTasks = "tasks" - defaultDirPerm = 0755 -) - -// defaultFilePerm is a var so that the test framework can change the filemode -// of all files created when the tests are running. The difference between the -// tests and real world use is that files like "cgroup.procs" will exist when writing -// to a read cgroup filesystem and do not exist prior when running in the tests. -// this is set to a non 0 value in the test code -var defaultFilePerm = os.FileMode(0) - -type Process struct { - // Subsystem is the name of the subsystem that the process is in - Subsystem Name - // Pid is the process id of the process - Pid int - // Path is the full path of the subsystem and location that the process is in - Path string -} - -type Task struct { - // Subsystem is the name of the subsystem that the task is in - Subsystem Name - // Pid is the process id of the task - Pid int - // Path is the full path of the subsystem and location that the task is in - Path string -} - -// Cgroup handles interactions with the individual groups to perform -// actions on them as them main interface to this cgroup package -type Cgroup interface { - // New creates a new cgroup under the calling cgroup - New(string, *specs.LinuxResources) (Cgroup, error) - // Add adds a process to the cgroup (cgroup.procs) - Add(Process) error - // AddTask adds a process to the cgroup (tasks) - AddTask(Process) error - // Delete removes the cgroup as a whole - Delete() error - // MoveTo moves all the processes under the calling cgroup to the provided one - // subsystems are moved one at a time - MoveTo(Cgroup) error - // Stat returns the stats for all subsystems in the cgroup - Stat(...ErrorHandler) (*v1.Metrics, error) - // Update updates all the subsystems with the provided resource changes - Update(resources *specs.LinuxResources) error - // Processes returns all the processes in a select subsystem for the cgroup - Processes(Name, bool) ([]Process, error) - // Tasks returns all the tasks in a select subsystem for the cgroup - Tasks(Name, bool) ([]Task, error) - // Freeze freezes or pauses all processes inside the cgroup - Freeze() error - // Thaw thaw or resumes all processes inside the cgroup - Thaw() error - // OOMEventFD returns the memory subsystem's event fd for OOM events - OOMEventFD() (uintptr, error) - // RegisterMemoryEvent returns the memory subsystems event fd for whatever memory event was - // registered for. Can alternatively register for the oom event with this method. - RegisterMemoryEvent(MemoryEvent) (uintptr, error) - // State returns the cgroups current state - State() State - // Subsystems returns all the subsystems in the cgroup - Subsystems() []Subsystem -} diff --git a/vendor/github.com/containerd/cgroups/cpu.go b/vendor/github.com/containerd/cgroups/cpu.go deleted file mode 100644 index 27024f1..0000000 --- a/vendor/github.com/containerd/cgroups/cpu.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "os" - "path/filepath" - "strconv" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewCpu(root string) *cpuController { - return &cpuController{ - root: filepath.Join(root, string(Cpu)), - } -} - -type cpuController struct { - root string -} - -func (c *cpuController) Name() Name { - return Cpu -} - -func (c *cpuController) Path(path string) string { - return filepath.Join(c.root, path) -} - -func (c *cpuController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { - return err - } - if cpu := resources.CPU; cpu != nil { - for _, t := range []struct { - name string - ivalue *int64 - uvalue *uint64 - }{ - { - name: "rt_period_us", - uvalue: cpu.RealtimePeriod, - }, - { - name: "rt_runtime_us", - ivalue: cpu.RealtimeRuntime, - }, - { - name: "shares", - uvalue: cpu.Shares, - }, - { - name: "cfs_period_us", - uvalue: cpu.Period, - }, - { - name: "cfs_quota_us", - ivalue: cpu.Quota, - }, - } { - var value []byte - if t.uvalue != nil { - value = []byte(strconv.FormatUint(*t.uvalue, 10)) - } else if t.ivalue != nil { - value = []byte(strconv.FormatInt(*t.ivalue, 10)) - } - if value != nil { - if err := retryingWriteFile( - filepath.Join(c.Path(path), "cpu."+t.name), - value, - defaultFilePerm, - ); err != nil { - return err - } - } - } - } - return nil -} - -func (c *cpuController) Update(path string, resources *specs.LinuxResources) error { - return c.Create(path, resources) -} - -func (c *cpuController) Stat(path string, stats *v1.Metrics) error { - f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat")) - if err != nil { - return err - } - defer f.Close() - // get or create the cpu field because cpuacct can also set values on this struct - sc := bufio.NewScanner(f) - for sc.Scan() { - key, v, err := parseKV(sc.Text()) - if err != nil { - return err - } - switch key { - case "nr_periods": - stats.CPU.Throttling.Periods = v - case "nr_throttled": - stats.CPU.Throttling.ThrottledPeriods = v - case "throttled_time": - stats.CPU.Throttling.ThrottledTime = v - } - } - return sc.Err() -} diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go deleted file mode 100644 index e5fc864..0000000 --- a/vendor/github.com/containerd/cgroups/cpuacct.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - - v1 "github.com/containerd/cgroups/stats/v1" -) - -const nanosecondsInSecond = 1000000000 - -var clockTicks = getClockTicks() - -func NewCpuacct(root string) *cpuacctController { - return &cpuacctController{ - root: filepath.Join(root, string(Cpuacct)), - } -} - -type cpuacctController struct { - root string -} - -func (c *cpuacctController) Name() Name { - return Cpuacct -} - -func (c *cpuacctController) Path(path string) string { - return filepath.Join(c.root, path) -} - -func (c *cpuacctController) Stat(path string, stats *v1.Metrics) error { - user, kernel, err := c.getUsage(path) - if err != nil { - return err - } - total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage")) - if err != nil { - return err - } - percpu, err := c.percpuUsage(path) - if err != nil { - return err - } - stats.CPU.Usage.Total = total - stats.CPU.Usage.User = user - stats.CPU.Usage.Kernel = kernel - stats.CPU.Usage.PerCPU = percpu - return nil -} - -func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) { - var usage []uint64 - data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu")) - if err != nil { - return nil, err - } - for _, v := range strings.Fields(string(data)) { - u, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return nil, err - } - usage = append(usage, u) - } - return usage, nil -} - -func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) { - statPath := filepath.Join(c.Path(path), "cpuacct.stat") - data, err := ioutil.ReadFile(statPath) - if err != nil { - return 0, 0, err - } - fields := strings.Fields(string(data)) - if len(fields) != 4 { - return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath) - } - for _, t := range []struct { - index int - name string - value *uint64 - }{ - { - index: 0, - name: "user", - value: &user, - }, - { - index: 2, - name: "system", - value: &kernel, - }, - } { - if fields[t.index] != t.name { - return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath) - } - v, err := strconv.ParseUint(fields[t.index+1], 10, 64) - if err != nil { - return 0, 0, err - } - *t.value = v - } - return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil -} diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go deleted file mode 100644 index 3cae173..0000000 --- a/vendor/github.com/containerd/cgroups/cpuset.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewCpuset(root string) *cpusetController { - return &cpusetController{ - root: filepath.Join(root, string(Cpuset)), - } -} - -type cpusetController struct { - root string -} - -func (c *cpusetController) Name() Name { - return Cpuset -} - -func (c *cpusetController) Path(path string) string { - return filepath.Join(c.root, path) -} - -func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error { - if err := c.ensureParent(c.Path(path), c.root); err != nil { - return err - } - if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { - return err - } - if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil { - return err - } - if resources.CPU != nil { - for _, t := range []struct { - name string - value string - }{ - { - name: "cpus", - value: resources.CPU.Cpus, - }, - { - name: "mems", - value: resources.CPU.Mems, - }, - } { - if t.value != "" { - if err := retryingWriteFile( - filepath.Join(c.Path(path), "cpuset."+t.name), - []byte(t.value), - defaultFilePerm, - ); err != nil { - return err - } - } - } - } - return nil -} - -func (c *cpusetController) Update(path string, resources *specs.LinuxResources) error { - return c.Create(path, resources) -} - -func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) { - if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) { - return - } - if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) { - return - } - return cpus, mems, nil -} - -// ensureParent makes sure that the parent directory of current is created -// and populated with the proper cpus and mems files copied from -// it's parent. -func (c *cpusetController) ensureParent(current, root string) error { - parent := filepath.Dir(current) - if _, err := filepath.Rel(root, parent); err != nil { - return nil - } - // Avoid infinite recursion. - if parent == current { - return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") - } - if cleanPath(parent) != root { - if err := c.ensureParent(parent, root); err != nil { - return err - } - } - if err := os.MkdirAll(current, defaultDirPerm); err != nil { - return err - } - return c.copyIfNeeded(current, parent) -} - -// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent -// directory to the current directory if the file's contents are 0 -func (c *cpusetController) copyIfNeeded(current, parent string) error { - var ( - err error - currentCpus, currentMems []byte - parentCpus, parentMems []byte - ) - if currentCpus, currentMems, err = c.getValues(current); err != nil { - return err - } - if parentCpus, parentMems, err = c.getValues(parent); err != nil { - return err - } - if isEmpty(currentCpus) { - if err := retryingWriteFile( - filepath.Join(current, "cpuset.cpus"), - parentCpus, - defaultFilePerm, - ); err != nil { - return err - } - } - if isEmpty(currentMems) { - if err := retryingWriteFile( - filepath.Join(current, "cpuset.mems"), - parentMems, - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -func isEmpty(b []byte) bool { - return len(bytes.Trim(b, "\n")) == 0 -} diff --git a/vendor/github.com/containerd/cgroups/devices.go b/vendor/github.com/containerd/cgroups/devices.go deleted file mode 100644 index 7792566..0000000 --- a/vendor/github.com/containerd/cgroups/devices.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "os" - "path/filepath" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - allowDeviceFile = "devices.allow" - denyDeviceFile = "devices.deny" - wildcard = -1 -) - -func NewDevices(root string) *devicesController { - return &devicesController{ - root: filepath.Join(root, string(Devices)), - } -} - -type devicesController struct { - root string -} - -func (d *devicesController) Name() Name { - return Devices -} - -func (d *devicesController) Path(path string) string { - return filepath.Join(d.root, path) -} - -func (d *devicesController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil { - return err - } - for _, device := range resources.Devices { - file := denyDeviceFile - if device.Allow { - file = allowDeviceFile - } - if device.Type == "" { - device.Type = "a" - } - if err := retryingWriteFile( - filepath.Join(d.Path(path), file), - []byte(deviceString(device)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -func (d *devicesController) Update(path string, resources *specs.LinuxResources) error { - return d.Create(path, resources) -} - -func deviceString(device specs.LinuxDeviceCgroup) string { - return fmt.Sprintf("%s %s:%s %s", - device.Type, - deviceNumber(device.Major), - deviceNumber(device.Minor), - device.Access, - ) -} - -func deviceNumber(number *int64) string { - if number == nil || *number == wildcard { - return "*" - } - return fmt.Sprint(*number) -} diff --git a/vendor/github.com/containerd/cgroups/errors.go b/vendor/github.com/containerd/cgroups/errors.go deleted file mode 100644 index f1ad831..0000000 --- a/vendor/github.com/containerd/cgroups/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "errors" - "os" -) - -var ( - ErrInvalidPid = errors.New("cgroups: pid must be greater than 0") - ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist") - ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed") - ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system") - ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system") - ErrCgroupDeleted = errors.New("cgroups: cgroup deleted") - ErrNoCgroupMountDestination = errors.New("cgroups: cannot find cgroup mount destination") -) - -// ErrorHandler is a function that handles and acts on errors -type ErrorHandler func(err error) error - -// IgnoreNotExist ignores any errors that are for not existing files -func IgnoreNotExist(err error) error { - if os.IsNotExist(err) { - return nil - } - return err -} - -func errPassthrough(err error) error { - return err -} diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go deleted file mode 100644 index 59a7e71..0000000 --- a/vendor/github.com/containerd/cgroups/freezer.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "path/filepath" - "strings" - "time" -) - -func NewFreezer(root string) *freezerController { - return &freezerController{ - root: filepath.Join(root, string(Freezer)), - } -} - -type freezerController struct { - root string -} - -func (f *freezerController) Name() Name { - return Freezer -} - -func (f *freezerController) Path(path string) string { - return filepath.Join(f.root, path) -} - -func (f *freezerController) Freeze(path string) error { - return f.waitState(path, Frozen) -} - -func (f *freezerController) Thaw(path string) error { - return f.waitState(path, Thawed) -} - -func (f *freezerController) changeState(path string, state State) error { - return retryingWriteFile( - filepath.Join(f.root, path, "freezer.state"), - []byte(strings.ToUpper(string(state))), - defaultFilePerm, - ) -} - -func (f *freezerController) state(path string) (State, error) { - current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state")) - if err != nil { - return "", err - } - return State(strings.ToLower(strings.TrimSpace(string(current)))), nil -} - -func (f *freezerController) waitState(path string, state State) error { - for { - if err := f.changeState(path, state); err != nil { - return err - } - current, err := f.state(path) - if err != nil { - return err - } - if current == state { - return nil - } - time.Sleep(1 * time.Millisecond) - } -} diff --git a/vendor/github.com/containerd/cgroups/hierarchy.go b/vendor/github.com/containerd/cgroups/hierarchy.go deleted file mode 100644 index ca3f1b9..0000000 --- a/vendor/github.com/containerd/cgroups/hierarchy.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -// Hierarchy enables both unified and split hierarchy for cgroups -type Hierarchy func() ([]Subsystem, error) diff --git a/vendor/github.com/containerd/cgroups/hugetlb.go b/vendor/github.com/containerd/cgroups/hugetlb.go deleted file mode 100644 index c0eb03b..0000000 --- a/vendor/github.com/containerd/cgroups/hugetlb.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "os" - "path/filepath" - "strconv" - "strings" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewHugetlb(root string) (*hugetlbController, error) { - sizes, err := hugePageSizes() - if err != nil { - return nil, err - } - - return &hugetlbController{ - root: filepath.Join(root, string(Hugetlb)), - sizes: sizes, - }, nil -} - -type hugetlbController struct { - root string - sizes []string -} - -func (h *hugetlbController) Name() Name { - return Hugetlb -} - -func (h *hugetlbController) Path(path string) string { - return filepath.Join(h.root, path) -} - -func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil { - return err - } - for _, limit := range resources.HugepageLimits { - if err := retryingWriteFile( - filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")), - []byte(strconv.FormatUint(limit.Limit, 10)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -func (h *hugetlbController) Stat(path string, stats *v1.Metrics) error { - for _, size := range h.sizes { - s, err := h.readSizeStat(path, size) - if err != nil { - return err - } - stats.Hugetlb = append(stats.Hugetlb, s) - } - return nil -} - -func (h *hugetlbController) readSizeStat(path, size string) (*v1.HugetlbStat, error) { - s := v1.HugetlbStat{ - Pagesize: size, - } - for _, t := range []struct { - name string - value *uint64 - }{ - { - name: "usage_in_bytes", - value: &s.Usage, - }, - { - name: "max_usage_in_bytes", - value: &s.Max, - }, - { - name: "failcnt", - value: &s.Failcnt, - }, - } { - v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, "."))) - if err != nil { - return nil, err - } - *t.value = v - } - return &s, nil -} diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go deleted file mode 100644 index e271866..0000000 --- a/vendor/github.com/containerd/cgroups/memory.go +++ /dev/null @@ -1,480 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" - "golang.org/x/sys/unix" -) - -// MemoryEvent is an interface that V1 memory Cgroup notifications implement. Arg returns the -// file name whose fd should be written to "cgroups.event_control". EventFile returns the name of -// the file that supports the notification api e.g. "memory.usage_in_bytes". -type MemoryEvent interface { - Arg() string - EventFile() string -} - -type memoryThresholdEvent struct { - threshold uint64 - swap bool -} - -// MemoryThresholdEvent returns a new memory threshold event to be used with RegisterMemoryEvent. -// If swap is true, the event will be registered using memory.memsw.usage_in_bytes -func MemoryThresholdEvent(threshold uint64, swap bool) MemoryEvent { - return &memoryThresholdEvent{ - threshold, - swap, - } -} - -func (m *memoryThresholdEvent) Arg() string { - return strconv.FormatUint(m.threshold, 10) -} - -func (m *memoryThresholdEvent) EventFile() string { - if m.swap { - return "memory.memsw.usage_in_bytes" - } - return "memory.usage_in_bytes" -} - -type oomEvent struct{} - -// OOMEvent returns a new oom event to be used with RegisterMemoryEvent. -func OOMEvent() MemoryEvent { - return &oomEvent{} -} - -func (oom *oomEvent) Arg() string { - return "" -} - -func (oom *oomEvent) EventFile() string { - return "memory.oom_control" -} - -type memoryPressureEvent struct { - pressureLevel MemoryPressureLevel - hierarchy EventNotificationMode -} - -// MemoryPressureEvent returns a new memory pressure event to be used with RegisterMemoryEvent. -func MemoryPressureEvent(pressureLevel MemoryPressureLevel, hierarchy EventNotificationMode) MemoryEvent { - return &memoryPressureEvent{ - pressureLevel, - hierarchy, - } -} - -func (m *memoryPressureEvent) Arg() string { - return string(m.pressureLevel) + "," + string(m.hierarchy) -} - -func (m *memoryPressureEvent) EventFile() string { - return "memory.pressure_level" -} - -// MemoryPressureLevel corresponds to the memory pressure levels defined -// for memory cgroups. -type MemoryPressureLevel string - -// The three memory pressure levels are as follows. -// - The "low" level means that the system is reclaiming memory for new -// allocations. Monitoring this reclaiming activity might be useful for -// maintaining cache level. Upon notification, the program (typically -// "Activity Manager") might analyze vmstat and act in advance (i.e. -// prematurely shutdown unimportant services). -// - The "medium" level means that the system is experiencing medium memory -// pressure, the system might be making swap, paging out active file caches, -// etc. Upon this event applications may decide to further analyze -// vmstat/zoneinfo/memcg or internal memory usage statistics and free any -// resources that can be easily reconstructed or re-read from a disk. -// - The "critical" level means that the system is actively thrashing, it is -// about to out of memory (OOM) or even the in-kernel OOM killer is on its -// way to trigger. Applications should do whatever they can to help the -// system. It might be too late to consult with vmstat or any other -// statistics, so it is advisable to take an immediate action. -// "https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt" Section 11 -const ( - LowPressure MemoryPressureLevel = "low" - MediumPressure MemoryPressureLevel = "medium" - CriticalPressure MemoryPressureLevel = "critical" -) - -// EventNotificationMode corresponds to the notification modes -// for the memory cgroups pressure level notifications. -type EventNotificationMode string - -// There are three optional modes that specify different propagation behavior: -// - "default": this is the default behavior specified above. This mode is the -// same as omitting the optional mode parameter, preserved by backwards -// compatibility. -// - "hierarchy": events always propagate up to the root, similar to the default -// behavior, except that propagation continues regardless of whether there are -// event listeners at each level, with the "hierarchy" mode. In the above -// example, groups A, B, and C will receive notification of memory pressure. -// - "local": events are pass-through, i.e. they only receive notifications when -// memory pressure is experienced in the memcg for which the notification is -// registered. In the above example, group C will receive notification if -// registered for "local" notification and the group experiences memory -// pressure. However, group B will never receive notification, regardless if -// there is an event listener for group C or not, if group B is registered for -// local notification. -// "https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt" Section 11 -const ( - DefaultMode EventNotificationMode = "default" - LocalMode EventNotificationMode = "local" - HierarchyMode EventNotificationMode = "hierarchy" -) - -// NewMemory returns a Memory controller given the root folder of cgroups. -// It may optionally accept other configuration options, such as IgnoreModules(...) -func NewMemory(root string, options ...func(*memoryController)) *memoryController { - mc := &memoryController{ - root: filepath.Join(root, string(Memory)), - ignored: map[string]struct{}{}, - } - for _, opt := range options { - opt(mc) - } - return mc -} - -// IgnoreModules configure the memory controller to not read memory metrics for some -// module names (e.g. passing "memsw" would avoid all the memory.memsw.* entries) -func IgnoreModules(names ...string) func(*memoryController) { - return func(mc *memoryController) { - for _, name := range names { - mc.ignored[name] = struct{}{} - } - } -} - -// OptionalSwap allows the memory controller to not fail if cgroups is not accounting -// Swap memory (there are no memory.memsw.* entries) -func OptionalSwap() func(*memoryController) { - return func(mc *memoryController) { - _, err := os.Stat(filepath.Join(mc.root, "memory.memsw.usage_in_bytes")) - if os.IsNotExist(err) { - mc.ignored["memsw"] = struct{}{} - } - } -} - -type memoryController struct { - root string - ignored map[string]struct{} -} - -func (m *memoryController) Name() Name { - return Memory -} - -func (m *memoryController) Path(path string) string { - return filepath.Join(m.root, path) -} - -func (m *memoryController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Memory == nil { - return nil - } - return m.set(path, getMemorySettings(resources)) -} - -func (m *memoryController) Update(path string, resources *specs.LinuxResources) error { - if resources.Memory == nil { - return nil - } - g := func(v *int64) bool { - return v != nil && *v > 0 - } - settings := getMemorySettings(resources) - if g(resources.Memory.Limit) && g(resources.Memory.Swap) { - // if the updated swap value is larger than the current memory limit set the swap changes first - // then set the memory limit as swap must always be larger than the current limit - current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes")) - if err != nil { - return err - } - if current < uint64(*resources.Memory.Swap) { - settings[0], settings[1] = settings[1], settings[0] - } - } - return m.set(path, settings) -} - -func (m *memoryController) Stat(path string, stats *v1.Metrics) error { - fMemStat, err := os.Open(filepath.Join(m.Path(path), "memory.stat")) - if err != nil { - return err - } - defer fMemStat.Close() - stats.Memory = &v1.MemoryStat{ - Usage: &v1.MemoryEntry{}, - Swap: &v1.MemoryEntry{}, - Kernel: &v1.MemoryEntry{}, - KernelTCP: &v1.MemoryEntry{}, - } - if err := m.parseStats(fMemStat, stats.Memory); err != nil { - return err - } - - fMemOomControl, err := os.Open(filepath.Join(m.Path(path), "memory.oom_control")) - if err != nil { - return err - } - defer fMemOomControl.Close() - stats.MemoryOomControl = &v1.MemoryOomControl{} - if err := m.parseOomControlStats(fMemOomControl, stats.MemoryOomControl); err != nil { - return err - } - for _, t := range []struct { - module string - entry *v1.MemoryEntry - }{ - { - module: "", - entry: stats.Memory.Usage, - }, - { - module: "memsw", - entry: stats.Memory.Swap, - }, - { - module: "kmem", - entry: stats.Memory.Kernel, - }, - { - module: "kmem.tcp", - entry: stats.Memory.KernelTCP, - }, - } { - if _, ok := m.ignored[t.module]; ok { - continue - } - for _, tt := range []struct { - name string - value *uint64 - }{ - { - name: "usage_in_bytes", - value: &t.entry.Usage, - }, - { - name: "max_usage_in_bytes", - value: &t.entry.Max, - }, - { - name: "failcnt", - value: &t.entry.Failcnt, - }, - { - name: "limit_in_bytes", - value: &t.entry.Limit, - }, - } { - parts := []string{"memory"} - if t.module != "" { - parts = append(parts, t.module) - } - parts = append(parts, tt.name) - v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, "."))) - if err != nil { - return err - } - *tt.value = v - } - } - return nil -} - -func (m *memoryController) parseStats(r io.Reader, stat *v1.MemoryStat) error { - var ( - raw = make(map[string]uint64) - sc = bufio.NewScanner(r) - line int - ) - for sc.Scan() { - key, v, err := parseKV(sc.Text()) - if err != nil { - return fmt.Errorf("%d: %v", line, err) - } - raw[key] = v - line++ - } - if err := sc.Err(); err != nil { - return err - } - stat.Cache = raw["cache"] - stat.RSS = raw["rss"] - stat.RSSHuge = raw["rss_huge"] - stat.MappedFile = raw["mapped_file"] - stat.Dirty = raw["dirty"] - stat.Writeback = raw["writeback"] - stat.PgPgIn = raw["pgpgin"] - stat.PgPgOut = raw["pgpgout"] - stat.PgFault = raw["pgfault"] - stat.PgMajFault = raw["pgmajfault"] - stat.InactiveAnon = raw["inactive_anon"] - stat.ActiveAnon = raw["active_anon"] - stat.InactiveFile = raw["inactive_file"] - stat.ActiveFile = raw["active_file"] - stat.Unevictable = raw["unevictable"] - stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"] - stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"] - stat.TotalCache = raw["total_cache"] - stat.TotalRSS = raw["total_rss"] - stat.TotalRSSHuge = raw["total_rss_huge"] - stat.TotalMappedFile = raw["total_mapped_file"] - stat.TotalDirty = raw["total_dirty"] - stat.TotalWriteback = raw["total_writeback"] - stat.TotalPgPgIn = raw["total_pgpgin"] - stat.TotalPgPgOut = raw["total_pgpgout"] - stat.TotalPgFault = raw["total_pgfault"] - stat.TotalPgMajFault = raw["total_pgmajfault"] - stat.TotalInactiveAnon = raw["total_inactive_anon"] - stat.TotalActiveAnon = raw["total_active_anon"] - stat.TotalInactiveFile = raw["total_inactive_file"] - stat.TotalActiveFile = raw["total_active_file"] - stat.TotalUnevictable = raw["total_unevictable"] - return nil -} - -func (m *memoryController) parseOomControlStats(r io.Reader, stat *v1.MemoryOomControl) error { - var ( - raw = make(map[string]uint64) - sc = bufio.NewScanner(r) - line int - ) - for sc.Scan() { - key, v, err := parseKV(sc.Text()) - if err != nil { - return fmt.Errorf("%d: %v", line, err) - } - raw[key] = v - line++ - } - if err := sc.Err(); err != nil { - return err - } - stat.OomKillDisable = raw["oom_kill_disable"] - stat.UnderOom = raw["under_oom"] - stat.OomKill = raw["oom_kill"] - return nil -} - -func (m *memoryController) set(path string, settings []memorySettings) error { - for _, t := range settings { - if t.value != nil { - if err := retryingWriteFile( - filepath.Join(m.Path(path), "memory."+t.name), - []byte(strconv.FormatInt(*t.value, 10)), - defaultFilePerm, - ); err != nil { - return err - } - } - } - return nil -} - -type memorySettings struct { - name string - value *int64 -} - -func getMemorySettings(resources *specs.LinuxResources) []memorySettings { - mem := resources.Memory - var swappiness *int64 - if mem.Swappiness != nil { - v := int64(*mem.Swappiness) - swappiness = &v - } - return []memorySettings{ - { - name: "limit_in_bytes", - value: mem.Limit, - }, - { - name: "soft_limit_in_bytes", - value: mem.Reservation, - }, - { - name: "memsw.limit_in_bytes", - value: mem.Swap, - }, - { - name: "kmem.limit_in_bytes", - value: mem.Kernel, - }, - { - name: "kmem.tcp.limit_in_bytes", - value: mem.KernelTCP, - }, - { - name: "oom_control", - value: getOomControlValue(mem), - }, - { - name: "swappiness", - value: swappiness, - }, - } -} - -func getOomControlValue(mem *specs.LinuxMemory) *int64 { - if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller { - i := int64(1) - return &i - } - return nil -} - -func (m *memoryController) memoryEvent(path string, event MemoryEvent) (uintptr, error) { - root := m.Path(path) - efd, err := unix.Eventfd(0, unix.EFD_CLOEXEC) - if err != nil { - return 0, err - } - evtFile, err := os.Open(filepath.Join(root, event.EventFile())) - if err != nil { - unix.Close(efd) - return 0, err - } - defer evtFile.Close() - data := fmt.Sprintf("%d %d %s", efd, evtFile.Fd(), event.Arg()) - evctlPath := filepath.Join(root, "cgroup.event_control") - if err := retryingWriteFile(evctlPath, []byte(data), 0700); err != nil { - unix.Close(efd) - return 0, err - } - return uintptr(efd), nil -} diff --git a/vendor/github.com/containerd/cgroups/named.go b/vendor/github.com/containerd/cgroups/named.go deleted file mode 100644 index 06b16c3..0000000 --- a/vendor/github.com/containerd/cgroups/named.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import "path/filepath" - -func NewNamed(root string, name Name) *namedController { - return &namedController{ - root: root, - name: name, - } -} - -type namedController struct { - root string - name Name -} - -func (n *namedController) Name() Name { - return n.name -} - -func (n *namedController) Path(path string) string { - return filepath.Join(n.root, string(n.name), path) -} diff --git a/vendor/github.com/containerd/cgroups/net_cls.go b/vendor/github.com/containerd/cgroups/net_cls.go deleted file mode 100644 index 839b06d..0000000 --- a/vendor/github.com/containerd/cgroups/net_cls.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "os" - "path/filepath" - "strconv" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewNetCls(root string) *netclsController { - return &netclsController{ - root: filepath.Join(root, string(NetCLS)), - } -} - -type netclsController struct { - root string -} - -func (n *netclsController) Name() Name { - return NetCLS -} - -func (n *netclsController) Path(path string) string { - return filepath.Join(n.root, path) -} - -func (n *netclsController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 { - return retryingWriteFile( - filepath.Join(n.Path(path), "net_cls.classid"), - []byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)), - defaultFilePerm, - ) - } - return nil -} - -func (n *netclsController) Update(path string, resources *specs.LinuxResources) error { - return n.Create(path, resources) -} diff --git a/vendor/github.com/containerd/cgroups/net_prio.go b/vendor/github.com/containerd/cgroups/net_prio.go deleted file mode 100644 index 6362fd0..0000000 --- a/vendor/github.com/containerd/cgroups/net_prio.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "os" - "path/filepath" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewNetPrio(root string) *netprioController { - return &netprioController{ - root: filepath.Join(root, string(NetPrio)), - } -} - -type netprioController struct { - root string -} - -func (n *netprioController) Name() Name { - return NetPrio -} - -func (n *netprioController) Path(path string) string { - return filepath.Join(n.root, path) -} - -func (n *netprioController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Network != nil { - for _, prio := range resources.Network.Priorities { - if err := retryingWriteFile( - filepath.Join(n.Path(path), "net_prio.ifpriomap"), - formatPrio(prio.Name, prio.Priority), - defaultFilePerm, - ); err != nil { - return err - } - } - } - return nil -} - -func formatPrio(name string, prio uint32) []byte { - return []byte(fmt.Sprintf("%s %d", name, prio)) -} diff --git a/vendor/github.com/containerd/cgroups/opts.go b/vendor/github.com/containerd/cgroups/opts.go deleted file mode 100644 index a1449e2..0000000 --- a/vendor/github.com/containerd/cgroups/opts.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "github.com/pkg/errors" -) - -var ( - // ErrIgnoreSubsystem allows the specific subsystem to be skipped - ErrIgnoreSubsystem = errors.New("skip subsystem") - // ErrDevicesRequired is returned when the devices subsystem is required but - // does not exist or is not active - ErrDevicesRequired = errors.New("devices subsystem is required") -) - -// InitOpts allows configuration for the creation or loading of a cgroup -type InitOpts func(*InitConfig) error - -// InitConfig provides configuration options for the creation -// or loading of a cgroup and its subsystems -type InitConfig struct { - // InitCheck can be used to check initialization errors from the subsystem - InitCheck InitCheck -} - -func newInitConfig() *InitConfig { - return &InitConfig{ - InitCheck: RequireDevices, - } -} - -// InitCheck allows subsystems errors to be checked when initialized or loaded -type InitCheck func(Subsystem, Path, error) error - -// AllowAny allows any subsystem errors to be skipped -func AllowAny(_ Subsystem, _ Path, _ error) error { - return ErrIgnoreSubsystem -} - -// RequireDevices requires the device subsystem but no others -func RequireDevices(s Subsystem, _ Path, _ error) error { - if s.Name() == Devices { - return ErrDevicesRequired - } - return ErrIgnoreSubsystem -} diff --git a/vendor/github.com/containerd/cgroups/paths.go b/vendor/github.com/containerd/cgroups/paths.go deleted file mode 100644 index 27197ec..0000000 --- a/vendor/github.com/containerd/cgroups/paths.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "path/filepath" - - "github.com/pkg/errors" -) - -type Path func(subsystem Name) (string, error) - -func RootPath(subsystem Name) (string, error) { - return "/", nil -} - -// StaticPath returns a static path to use for all cgroups -func StaticPath(path string) Path { - return func(_ Name) (string, error) { - return path, nil - } -} - -// NestedPath will nest the cgroups based on the calling processes cgroup -// placing its child processes inside its own path -func NestedPath(suffix string) Path { - paths, err := parseCgroupFile("/proc/self/cgroup") - if err != nil { - return errorPath(err) - } - return existingPath(paths, suffix) -} - -// PidPath will return the correct cgroup paths for an existing process running inside a cgroup -// This is commonly used for the Load function to restore an existing container -func PidPath(pid int) Path { - p := fmt.Sprintf("/proc/%d/cgroup", pid) - paths, err := parseCgroupFile(p) - if err != nil { - return errorPath(errors.Wrapf(err, "parse cgroup file %s", p)) - } - return existingPath(paths, "") -} - -// ErrControllerNotActive is returned when a controller is not supported or enabled -var ErrControllerNotActive = errors.New("controller is not supported") - -func existingPath(paths map[string]string, suffix string) Path { - // localize the paths based on the root mount dest for nested cgroups - for n, p := range paths { - dest, err := getCgroupDestination(n) - if err != nil { - return errorPath(err) - } - rel, err := filepath.Rel(dest, p) - if err != nil { - return errorPath(err) - } - if rel == "." { - rel = dest - } - paths[n] = filepath.Join("/", rel) - } - return func(name Name) (string, error) { - root, ok := paths[string(name)] - if !ok { - if root, ok = paths["name="+string(name)]; !ok { - return "", ErrControllerNotActive - } - } - if suffix != "" { - return filepath.Join(root, suffix), nil - } - return root, nil - } -} - -func subPath(path Path, subName string) Path { - return func(name Name) (string, error) { - p, err := path(name) - if err != nil { - return "", err - } - return filepath.Join(p, subName), nil - } -} - -func errorPath(err error) Path { - return func(_ Name) (string, error) { - return "", err - } -} diff --git a/vendor/github.com/containerd/cgroups/perf_event.go b/vendor/github.com/containerd/cgroups/perf_event.go deleted file mode 100644 index 648786d..0000000 --- a/vendor/github.com/containerd/cgroups/perf_event.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import "path/filepath" - -func NewPerfEvent(root string) *PerfEventController { - return &PerfEventController{ - root: filepath.Join(root, string(PerfEvent)), - } -} - -type PerfEventController struct { - root string -} - -func (p *PerfEventController) Name() Name { - return PerfEvent -} - -func (p *PerfEventController) Path(path string) string { - return filepath.Join(p.root, path) -} diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go deleted file mode 100644 index ce78e44..0000000 --- a/vendor/github.com/containerd/cgroups/pids.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewPids(root string) *pidsController { - return &pidsController{ - root: filepath.Join(root, string(Pids)), - } -} - -type pidsController struct { - root string -} - -func (p *pidsController) Name() Name { - return Pids -} - -func (p *pidsController) Path(path string) string { - return filepath.Join(p.root, path) -} - -func (p *pidsController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Pids != nil && resources.Pids.Limit > 0 { - return retryingWriteFile( - filepath.Join(p.Path(path), "pids.max"), - []byte(strconv.FormatInt(resources.Pids.Limit, 10)), - defaultFilePerm, - ) - } - return nil -} - -func (p *pidsController) Update(path string, resources *specs.LinuxResources) error { - return p.Create(path, resources) -} - -func (p *pidsController) Stat(path string, stats *v1.Metrics) error { - current, err := readUint(filepath.Join(p.Path(path), "pids.current")) - if err != nil { - return err - } - var max uint64 - maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max")) - if err != nil { - return err - } - if maxS := strings.TrimSpace(string(maxData)); maxS != "max" { - if max, err = parseUint(maxS, 10, 64); err != nil { - return err - } - } - stats.Pids = &v1.PidsStat{ - Current: current, - Limit: max, - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/rdma.go b/vendor/github.com/containerd/cgroups/rdma.go deleted file mode 100644 index b6f0d41..0000000 --- a/vendor/github.com/containerd/cgroups/rdma.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "math" - "os" - "path/filepath" - "strconv" - "strings" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -type rdmaController struct { - root string -} - -func (p *rdmaController) Name() Name { - return Rdma -} - -func (p *rdmaController) Path(path string) string { - return filepath.Join(p.root, path) -} - -func NewRdma(root string) *rdmaController { - return &rdmaController{ - root: filepath.Join(root, string(Rdma)), - } -} - -func createCmdString(device string, limits *specs.LinuxRdma) string { - var cmdString string - - cmdString = device - if limits.HcaHandles != nil { - cmdString = cmdString + " " + "hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10) - } - - if limits.HcaObjects != nil { - cmdString = cmdString + " " + "hca_object=" + strconv.FormatUint(uint64(*limits.HcaObjects), 10) - } - return cmdString -} - -func (p *rdmaController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil { - return err - } - - for device, limit := range resources.Rdma { - if device != "" && (limit.HcaHandles != nil || limit.HcaObjects != nil) { - return retryingWriteFile( - filepath.Join(p.Path(path), "rdma.max"), - []byte(createCmdString(device, &limit)), - defaultFilePerm, - ) - } - } - return nil -} - -func (p *rdmaController) Update(path string, resources *specs.LinuxResources) error { - return p.Create(path, resources) -} - -func parseRdmaKV(raw string, entry *v1.RdmaEntry) { - var value uint64 - var err error - - parts := strings.Split(raw, "=") - switch len(parts) { - case 2: - if parts[1] == "max" { - value = math.MaxUint32 - } else { - value, err = parseUint(parts[1], 10, 32) - if err != nil { - return - } - } - if parts[0] == "hca_handle" { - entry.HcaHandles = uint32(value) - } else if parts[0] == "hca_object" { - entry.HcaObjects = uint32(value) - } - } -} - -func toRdmaEntry(strEntries []string) []*v1.RdmaEntry { - var rdmaEntries []*v1.RdmaEntry - for i := range strEntries { - parts := strings.Fields(strEntries[i]) - switch len(parts) { - case 3: - entry := new(v1.RdmaEntry) - entry.Device = parts[0] - parseRdmaKV(parts[1], entry) - parseRdmaKV(parts[2], entry) - - rdmaEntries = append(rdmaEntries, entry) - default: - continue - } - } - return rdmaEntries -} - -func (p *rdmaController) Stat(path string, stats *v1.Metrics) error { - - currentData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.current")) - if err != nil { - return err - } - currentPerDevices := strings.Split(string(currentData), "\n") - - maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.max")) - if err != nil { - return err - } - maxPerDevices := strings.Split(string(maxData), "\n") - - // If device got removed between reading two files, ignore returning - // stats. - if len(currentPerDevices) != len(maxPerDevices) { - return nil - } - - currentEntries := toRdmaEntry(currentPerDevices) - maxEntries := toRdmaEntry(maxPerDevices) - - stats.Rdma = &v1.RdmaStat{ - Current: currentEntries, - Limit: maxEntries, - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go deleted file mode 100644 index 6d2d417..0000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go +++ /dev/null @@ -1,6125 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/cgroups/stats/v1/metrics.proto - -package v1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Metrics struct { - Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` - Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` - CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` - Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` - Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` - Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` - Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` - CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` - MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metrics) Reset() { *m = Metrics{} } -func (*Metrics) ProtoMessage() {} -func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{0} -} -func (m *Metrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metrics.Merge(m, src) -} -func (m *Metrics) XXX_Size() int { - return m.Size() -} -func (m *Metrics) XXX_DiscardUnknown() { - xxx_messageInfo_Metrics.DiscardUnknown(m) -} - -var xxx_messageInfo_Metrics proto.InternalMessageInfo - -type HugetlbStat struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` - Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } -func (*HugetlbStat) ProtoMessage() {} -func (*HugetlbStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{1} -} -func (m *HugetlbStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HugetlbStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_HugetlbStat.Merge(m, src) -} -func (m *HugetlbStat) XXX_Size() int { - return m.Size() -} -func (m *HugetlbStat) XXX_DiscardUnknown() { - xxx_messageInfo_HugetlbStat.DiscardUnknown(m) -} - -var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo - -type PidsStat struct { - Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` - Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsStat) Reset() { *m = PidsStat{} } -func (*PidsStat) ProtoMessage() {} -func (*PidsStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{2} -} -func (m *PidsStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsStat.Merge(m, src) -} -func (m *PidsStat) XXX_Size() int { - return m.Size() -} -func (m *PidsStat) XXX_DiscardUnknown() { - xxx_messageInfo_PidsStat.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsStat proto.InternalMessageInfo - -type CPUStat struct { - Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` - Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CPUStat) Reset() { *m = CPUStat{} } -func (*CPUStat) ProtoMessage() {} -func (*CPUStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{3} -} -func (m *CPUStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPUStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPUStat.Merge(m, src) -} -func (m *CPUStat) XXX_Size() int { - return m.Size() -} -func (m *CPUStat) XXX_DiscardUnknown() { - xxx_messageInfo_CPUStat.DiscardUnknown(m) -} - -var xxx_messageInfo_CPUStat proto.InternalMessageInfo - -type CPUUsage struct { - // values in nanoseconds - Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` - User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` - PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CPUUsage) Reset() { *m = CPUUsage{} } -func (*CPUUsage) ProtoMessage() {} -func (*CPUUsage) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{4} -} -func (m *CPUUsage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPUUsage) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPUUsage.Merge(m, src) -} -func (m *CPUUsage) XXX_Size() int { - return m.Size() -} -func (m *CPUUsage) XXX_DiscardUnknown() { - xxx_messageInfo_CPUUsage.DiscardUnknown(m) -} - -var xxx_messageInfo_CPUUsage proto.InternalMessageInfo - -type Throttle struct { - Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` - ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` - ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Throttle) Reset() { *m = Throttle{} } -func (*Throttle) ProtoMessage() {} -func (*Throttle) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{5} -} -func (m *Throttle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Throttle) XXX_Merge(src proto.Message) { - xxx_messageInfo_Throttle.Merge(m, src) -} -func (m *Throttle) XXX_Size() int { - return m.Size() -} -func (m *Throttle) XXX_DiscardUnknown() { - xxx_messageInfo_Throttle.DiscardUnknown(m) -} - -var xxx_messageInfo_Throttle proto.InternalMessageInfo - -type MemoryStat struct { - Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` - RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` - RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` - MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` - Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` - Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` - PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` - PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` - PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` - PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` - InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` - ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` - InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` - ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` - Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` - HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` - HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` - TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` - TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` - TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` - TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` - TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` - TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` - TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` - TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` - TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` - TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` - TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` - TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` - TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` - TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` - TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` - Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` - Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` - Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` - KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryStat) Reset() { *m = MemoryStat{} } -func (*MemoryStat) ProtoMessage() {} -func (*MemoryStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{6} -} -func (m *MemoryStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryStat.Merge(m, src) -} -func (m *MemoryStat) XXX_Size() int { - return m.Size() -} -func (m *MemoryStat) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryStat.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryStat proto.InternalMessageInfo - -type MemoryEntry struct { - Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` - Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } -func (*MemoryEntry) ProtoMessage() {} -func (*MemoryEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{7} -} -func (m *MemoryEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryEntry.Merge(m, src) -} -func (m *MemoryEntry) XXX_Size() int { - return m.Size() -} -func (m *MemoryEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo - -type MemoryOomControl struct { - OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` - UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` - OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } -func (*MemoryOomControl) ProtoMessage() {} -func (*MemoryOomControl) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{8} -} -func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryOomControl) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryOomControl.Merge(m, src) -} -func (m *MemoryOomControl) XXX_Size() int { - return m.Size() -} -func (m *MemoryOomControl) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo - -type BlkIOStat struct { - IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` - IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` - IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` - SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } -func (*BlkIOStat) ProtoMessage() {} -func (*BlkIOStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{9} -} -func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlkIOStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlkIOStat.Merge(m, src) -} -func (m *BlkIOStat) XXX_Size() int { - return m.Size() -} -func (m *BlkIOStat) XXX_DiscardUnknown() { - xxx_messageInfo_BlkIOStat.DiscardUnknown(m) -} - -var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo - -type BlkIOEntry struct { - Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` - Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` - Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` - Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` - Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } -func (*BlkIOEntry) ProtoMessage() {} -func (*BlkIOEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{10} -} -func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlkIOEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlkIOEntry.Merge(m, src) -} -func (m *BlkIOEntry) XXX_Size() int { - return m.Size() -} -func (m *BlkIOEntry) XXX_DiscardUnknown() { - xxx_messageInfo_BlkIOEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo - -type RdmaStat struct { - Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` - Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaStat) Reset() { *m = RdmaStat{} } -func (*RdmaStat) ProtoMessage() {} -func (*RdmaStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{11} -} -func (m *RdmaStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaStat.Merge(m, src) -} -func (m *RdmaStat) XXX_Size() int { - return m.Size() -} -func (m *RdmaStat) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaStat.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaStat proto.InternalMessageInfo - -type RdmaEntry struct { - Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` - HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` - HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } -func (*RdmaEntry) ProtoMessage() {} -func (*RdmaEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{12} -} -func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaEntry.Merge(m, src) -} -func (m *RdmaEntry) XXX_Size() int { - return m.Size() -} -func (m *RdmaEntry) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo - -type NetworkStat struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` - RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` - RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` - RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` - TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` - TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` - TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` - TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NetworkStat) Reset() { *m = NetworkStat{} } -func (*NetworkStat) ProtoMessage() {} -func (*NetworkStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{13} -} -func (m *NetworkStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NetworkStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkStat.Merge(m, src) -} -func (m *NetworkStat) XXX_Size() int { - return m.Size() -} -func (m *NetworkStat) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkStat.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkStat proto.InternalMessageInfo - -// CgroupStats exports per-cgroup statistics. -type CgroupStats struct { - // number of tasks sleeping - NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` - // number of tasks running - NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` - // number of tasks in stopped state - NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` - // number of tasks in uninterruptible state - NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` - // number of tasks waiting on IO - NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CgroupStats) Reset() { *m = CgroupStats{} } -func (*CgroupStats) ProtoMessage() {} -func (*CgroupStats) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{14} -} -func (m *CgroupStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CgroupStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_CgroupStats.Merge(m, src) -} -func (m *CgroupStats) XXX_Size() int { - return m.Size() -} -func (m *CgroupStats) XXX_DiscardUnknown() { - xxx_messageInfo_CgroupStats.DiscardUnknown(m) -} - -var xxx_messageInfo_CgroupStats proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") - proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") - proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") - proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") - proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") - proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") - proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") - proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") - proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") - proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") - proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") - proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") - proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") - proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") - proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") -} - -func init() { - proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa) -} - -var fileDescriptor_a17b2d87c332bfaa = []byte{ - // 1749 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, - 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, - 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, - 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, - 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, - 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, - 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, - 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, - 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, - 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, - 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, - 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, - 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, - 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, - 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, - 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, - 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, - 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, - 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, - 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, - 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, - 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, - 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, - 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, - 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, - 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, - 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, - 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, - 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, - 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, - 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, - 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, - 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, - 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, - 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, - 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, - 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, - 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, - 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, - 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, - 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, - 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, - 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, - 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, - 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, - 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, - 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, - 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, - 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, - 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, - 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, - 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, - 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, - 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, - 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, - 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, - 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, - 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, - 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, - 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, - 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, - 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, - 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, - 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, - 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, - 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, - 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, - 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, - 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, - 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, - 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, - 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, - 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, - 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, - 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, - 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, - 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, - 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, - 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, - 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, - 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, - 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, - 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, - 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, - 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, - 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, - 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, - 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, - 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, - 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, - 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, - 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, - 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, - 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, - 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, - 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, - 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, - 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, - 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, - 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, - 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, - 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, - 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, - 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, - 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, - 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, - 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, - 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, - 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, - 0x22, 0xc4, 0x11, 0x00, 0x00, -} - -func (m *Metrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MemoryOomControl != nil { - { - size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.CgroupStats != nil { - { - size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.Network) > 0 { - for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.Rdma != nil { - { - size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Blkio != nil { - { - size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.CPU != nil { - { - size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Pids != nil { - { - size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Hugetlb) > 0 { - for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Pagesize) > 0 { - i -= len(m.Pagesize) - copy(dAtA[i:], m.Pagesize) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) - i-- - dAtA[i] = 0x22 - } - if m.Failcnt != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - i-- - dAtA[i] = 0x18 - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - } - if m.Usage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *PidsStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Limit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x10 - } - if m.Current != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CPUStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Throttling != nil { - { - size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Usage != nil { - { - size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CPUUsage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PerCPU) > 0 { - dAtA11 := make([]byte, len(m.PerCPU)*10) - var j10 int - for _, num := range m.PerCPU { - for num >= 1<<7 { - dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j10++ - } - dAtA11[j10] = uint8(num) - j10++ - } - i -= j10 - copy(dAtA[i:], dAtA11[:j10]) - i = encodeVarintMetrics(dAtA, i, uint64(j10)) - i-- - dAtA[i] = 0x22 - } - if m.User != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.User)) - i-- - dAtA[i] = 0x18 - } - if m.Kernel != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) - i-- - dAtA[i] = 0x10 - } - if m.Total != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Throttle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ThrottledTime != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) - i-- - dAtA[i] = 0x18 - } - if m.ThrottledPeriods != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) - i-- - dAtA[i] = 0x10 - } - if m.Periods != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.KernelTCP != nil { - { - size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if m.Kernel != nil { - { - size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if m.Swap != nil { - { - size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - if m.Usage != nil { - { - size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - if m.TotalUnevictable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x80 - } - if m.TotalActiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf8 - } - if m.TotalInactiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf0 - } - if m.TotalActiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe8 - } - if m.TotalInactiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe0 - } - if m.TotalPgMajFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd8 - } - if m.TotalPgFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd0 - } - if m.TotalPgPgOut != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc8 - } - if m.TotalPgPgIn != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - } - if m.TotalWriteback != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb8 - } - if m.TotalDirty != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.TotalMappedFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if m.TotalRSSHuge != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa0 - } - if m.TotalRSS != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x98 - } - if m.TotalCache != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - } - if m.HierarchicalSwapLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - } - if m.HierarchicalMemoryLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.Unevictable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) - i-- - dAtA[i] = 0x78 - } - if m.ActiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) - i-- - dAtA[i] = 0x70 - } - if m.InactiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) - i-- - dAtA[i] = 0x68 - } - if m.ActiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) - i-- - dAtA[i] = 0x60 - } - if m.InactiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) - i-- - dAtA[i] = 0x58 - } - if m.PgMajFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) - i-- - dAtA[i] = 0x50 - } - if m.PgFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) - i-- - dAtA[i] = 0x48 - } - if m.PgPgOut != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) - i-- - dAtA[i] = 0x40 - } - if m.PgPgIn != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) - i-- - dAtA[i] = 0x38 - } - if m.Writeback != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) - i-- - dAtA[i] = 0x30 - } - if m.Dirty != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) - i-- - dAtA[i] = 0x28 - } - if m.MappedFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) - i-- - dAtA[i] = 0x20 - } - if m.RSSHuge != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) - i-- - dAtA[i] = 0x18 - } - if m.RSS != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) - i-- - dAtA[i] = 0x10 - } - if m.Cache != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Failcnt != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - i-- - dAtA[i] = 0x20 - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x18 - } - if m.Usage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - i-- - dAtA[i] = 0x10 - } - if m.Limit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.OomKill != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) - i-- - dAtA[i] = 0x18 - } - if m.UnderOom != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) - i-- - dAtA[i] = 0x10 - } - if m.OomKillDisable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SectorsRecursive) > 0 { - for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if len(m.IoTimeRecursive) > 0 { - for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.IoMergedRecursive) > 0 { - for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.IoQueuedRecursive) > 0 { - for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.IoServicedRecursive) > 0 { - for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.IoServiceBytesRecursive) > 0 { - for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Value != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x28 - } - if m.Minor != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) - i-- - dAtA[i] = 0x20 - } - if m.Major != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) - i-- - dAtA[i] = 0x18 - } - if len(m.Device) > 0 { - i -= len(m.Device) - copy(dAtA[i:], m.Device) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i-- - dAtA[i] = 0x12 - } - if len(m.Op) > 0 { - i -= len(m.Op) - copy(dAtA[i:], m.Op) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RdmaStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Limit) > 0 { - for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Current) > 0 { - for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.HcaObjects != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) - i-- - dAtA[i] = 0x18 - } - if m.HcaHandles != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) - i-- - dAtA[i] = 0x10 - } - if len(m.Device) > 0 { - i -= len(m.Device) - copy(dAtA[i:], m.Device) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NetworkStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TxDropped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) - i-- - dAtA[i] = 0x48 - } - if m.TxErrors != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) - i-- - dAtA[i] = 0x40 - } - if m.TxPackets != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) - i-- - dAtA[i] = 0x38 - } - if m.TxBytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) - i-- - dAtA[i] = 0x30 - } - if m.RxDropped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) - i-- - dAtA[i] = 0x28 - } - if m.RxErrors != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) - i-- - dAtA[i] = 0x20 - } - if m.RxPackets != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) - i-- - dAtA[i] = 0x18 - } - if m.RxBytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CgroupStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.NrIoWait != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) - i-- - dAtA[i] = 0x28 - } - if m.NrUninterruptible != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) - i-- - dAtA[i] = 0x20 - } - if m.NrStopped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) - i-- - dAtA[i] = 0x18 - } - if m.NrRunning != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) - i-- - dAtA[i] = 0x10 - } - if m.NrSleeping != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Metrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hugetlb) > 0 { - for _, e := range m.Hugetlb { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Pids != nil { - l = m.Pids.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.CPU != nil { - l = m.CPU.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Blkio != nil { - l = m.Blkio.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Rdma != nil { - l = m.Rdma.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if len(m.Network) > 0 { - for _, e := range m.Network { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.CgroupStats != nil { - l = m.CgroupStats.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.MemoryOomControl != nil { - l = m.MemoryOomControl.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HugetlbStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - l = len(m.Pagesize) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Current != 0 { - n += 1 + sovMetrics(uint64(m.Current)) - } - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CPUStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Usage != nil { - l = m.Usage.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Throttling != nil { - l = m.Throttling.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CPUUsage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Total != 0 { - n += 1 + sovMetrics(uint64(m.Total)) - } - if m.Kernel != 0 { - n += 1 + sovMetrics(uint64(m.Kernel)) - } - if m.User != 0 { - n += 1 + sovMetrics(uint64(m.User)) - } - if len(m.PerCPU) > 0 { - l = 0 - for _, e := range m.PerCPU { - l += sovMetrics(uint64(e)) - } - n += 1 + sovMetrics(uint64(l)) + l - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Throttle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Periods != 0 { - n += 1 + sovMetrics(uint64(m.Periods)) - } - if m.ThrottledPeriods != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) - } - if m.ThrottledTime != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledTime)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Cache != 0 { - n += 1 + sovMetrics(uint64(m.Cache)) - } - if m.RSS != 0 { - n += 1 + sovMetrics(uint64(m.RSS)) - } - if m.RSSHuge != 0 { - n += 1 + sovMetrics(uint64(m.RSSHuge)) - } - if m.MappedFile != 0 { - n += 1 + sovMetrics(uint64(m.MappedFile)) - } - if m.Dirty != 0 { - n += 1 + sovMetrics(uint64(m.Dirty)) - } - if m.Writeback != 0 { - n += 1 + sovMetrics(uint64(m.Writeback)) - } - if m.PgPgIn != 0 { - n += 1 + sovMetrics(uint64(m.PgPgIn)) - } - if m.PgPgOut != 0 { - n += 1 + sovMetrics(uint64(m.PgPgOut)) - } - if m.PgFault != 0 { - n += 1 + sovMetrics(uint64(m.PgFault)) - } - if m.PgMajFault != 0 { - n += 1 + sovMetrics(uint64(m.PgMajFault)) - } - if m.InactiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.InactiveAnon)) - } - if m.ActiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.ActiveAnon)) - } - if m.InactiveFile != 0 { - n += 1 + sovMetrics(uint64(m.InactiveFile)) - } - if m.ActiveFile != 0 { - n += 1 + sovMetrics(uint64(m.ActiveFile)) - } - if m.Unevictable != 0 { - n += 1 + sovMetrics(uint64(m.Unevictable)) - } - if m.HierarchicalMemoryLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) - } - if m.HierarchicalSwapLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) - } - if m.TotalCache != 0 { - n += 2 + sovMetrics(uint64(m.TotalCache)) - } - if m.TotalRSS != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSS)) - } - if m.TotalRSSHuge != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) - } - if m.TotalMappedFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalMappedFile)) - } - if m.TotalDirty != 0 { - n += 2 + sovMetrics(uint64(m.TotalDirty)) - } - if m.TotalWriteback != 0 { - n += 2 + sovMetrics(uint64(m.TotalWriteback)) - } - if m.TotalPgPgIn != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) - } - if m.TotalPgPgOut != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) - } - if m.TotalPgFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgFault)) - } - if m.TotalPgMajFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) - } - if m.TotalInactiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) - } - if m.TotalActiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) - } - if m.TotalInactiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) - } - if m.TotalActiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveFile)) - } - if m.TotalUnevictable != 0 { - n += 2 + sovMetrics(uint64(m.TotalUnevictable)) - } - if m.Usage != nil { - l = m.Usage.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Swap != nil { - l = m.Swap.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Kernel != nil { - l = m.Kernel.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.KernelTCP != nil { - l = m.KernelTCP.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryOomControl) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OomKillDisable != 0 { - n += 1 + sovMetrics(uint64(m.OomKillDisable)) - } - if m.UnderOom != 0 { - n += 1 + sovMetrics(uint64(m.UnderOom)) - } - if m.OomKill != 0 { - n += 1 + sovMetrics(uint64(m.OomKill)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BlkIOStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.IoServiceBytesRecursive) > 0 { - for _, e := range m.IoServiceBytesRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServicedRecursive) > 0 { - for _, e := range m.IoServicedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoQueuedRecursive) > 0 { - for _, e := range m.IoQueuedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for _, e := range m.IoServiceTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for _, e := range m.IoWaitTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoMergedRecursive) > 0 { - for _, e := range m.IoMergedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoTimeRecursive) > 0 { - for _, e := range m.IoTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.SectorsRecursive) > 0 { - for _, e := range m.SectorsRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BlkIOEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Op) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Major != 0 { - n += 1 + sovMetrics(uint64(m.Major)) - } - if m.Minor != 0 { - n += 1 + sovMetrics(uint64(m.Minor)) - } - if m.Value != 0 { - n += 1 + sovMetrics(uint64(m.Value)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Current) > 0 { - for _, e := range m.Current { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Limit) > 0 { - for _, e := range m.Limit { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.HcaHandles != 0 { - n += 1 + sovMetrics(uint64(m.HcaHandles)) - } - if m.HcaObjects != 0 { - n += 1 + sovMetrics(uint64(m.HcaObjects)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NetworkStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.RxBytes != 0 { - n += 1 + sovMetrics(uint64(m.RxBytes)) - } - if m.RxPackets != 0 { - n += 1 + sovMetrics(uint64(m.RxPackets)) - } - if m.RxErrors != 0 { - n += 1 + sovMetrics(uint64(m.RxErrors)) - } - if m.RxDropped != 0 { - n += 1 + sovMetrics(uint64(m.RxDropped)) - } - if m.TxBytes != 0 { - n += 1 + sovMetrics(uint64(m.TxBytes)) - } - if m.TxPackets != 0 { - n += 1 + sovMetrics(uint64(m.TxPackets)) - } - if m.TxErrors != 0 { - n += 1 + sovMetrics(uint64(m.TxErrors)) - } - if m.TxDropped != 0 { - n += 1 + sovMetrics(uint64(m.TxDropped)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CgroupStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NrSleeping != 0 { - n += 1 + sovMetrics(uint64(m.NrSleeping)) - } - if m.NrRunning != 0 { - n += 1 + sovMetrics(uint64(m.NrRunning)) - } - if m.NrStopped != 0 { - n += 1 + sovMetrics(uint64(m.NrStopped)) - } - if m.NrUninterruptible != 0 { - n += 1 + sovMetrics(uint64(m.NrUninterruptible)) - } - if m.NrIoWait != 0 { - n += 1 + sovMetrics(uint64(m.NrIoWait)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Metrics) String() string { - if this == nil { - return "nil" - } - repeatedStringForHugetlb := "[]*HugetlbStat{" - for _, f := range this.Hugetlb { - repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," - } - repeatedStringForHugetlb += "}" - repeatedStringForNetwork := "[]*NetworkStat{" - for _, f := range this.Network { - repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," - } - repeatedStringForNetwork += "}" - s := strings.Join([]string{`&Metrics{`, - `Hugetlb:` + repeatedStringForHugetlb + `,`, - `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, - `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, - `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, - `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, - `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, - `Network:` + repeatedStringForNetwork + `,`, - `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, - `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *HugetlbStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HugetlbStat{`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidsStat{`, - `Current:` + fmt.Sprintf("%v", this.Current) + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CPUStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUStat{`, - `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, - `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CPUUsage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUUsage{`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Throttle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Throttle{`, - `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, - `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, - `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryStat{`, - `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, - `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, - `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, - `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, - `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, - `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, - `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, - `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, - `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, - `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, - `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, - `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, - `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, - `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, - `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, - `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, - `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, - `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, - `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, - `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, - `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, - `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, - `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, - `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, - `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, - `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, - `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, - `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, - `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, - `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, - `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, - `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, - `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryEntry{`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryOomControl) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryOomControl{`, - `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, - `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, - `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServiceBytesRecursive { - repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServiceBytesRecursive += "}" - repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServicedRecursive { - repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServicedRecursive += "}" - repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoQueuedRecursive { - repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoQueuedRecursive += "}" - repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServiceTimeRecursive { - repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServiceTimeRecursive += "}" - repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoWaitTimeRecursive { - repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoWaitTimeRecursive += "}" - repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoMergedRecursive { - repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoMergedRecursive += "}" - repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoTimeRecursive { - repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoTimeRecursive += "}" - repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" - for _, f := range this.SectorsRecursive { - repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForSectorsRecursive += "}" - s := strings.Join([]string{`&BlkIOStat{`, - `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, - `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, - `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, - `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, - `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, - `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, - `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, - `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BlkIOEntry{`, - `Op:` + fmt.Sprintf("%v", this.Op) + `,`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `Major:` + fmt.Sprintf("%v", this.Major) + `,`, - `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForCurrent := "[]*RdmaEntry{" - for _, f := range this.Current { - repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForCurrent += "}" - repeatedStringForLimit := "[]*RdmaEntry{" - for _, f := range this.Limit { - repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForLimit += "}" - s := strings.Join([]string{`&RdmaStat{`, - `Current:` + repeatedStringForCurrent + `,`, - `Limit:` + repeatedStringForLimit + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RdmaEntry{`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, - `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkStat{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, - `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, - `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, - `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, - `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, - `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, - `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, - `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CgroupStats) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CgroupStats{`, - `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, - `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, - `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, - `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, - `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringMetrics(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Metrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) - if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pids == nil { - m.Pids = &PidsStat{} - } - if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPU == nil { - m.CPU = &CPUStat{} - } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &MemoryStat{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Blkio == nil { - m.Blkio = &BlkIOStat{} - } - if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Rdma == nil { - m.Rdma = &RdmaStat{} - } - if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Network = append(m.Network, &NetworkStat{}) - if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CgroupStats == nil { - m.CgroupStats = &CgroupStats{} - } - if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MemoryOomControl == nil { - m.MemoryOomControl = &MemoryOomControl{} - } - if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HugetlbStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pagesize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &CPUUsage{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Throttling == nil { - m.Throttling = &Throttle{} - } - if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUUsage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - m.Kernel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kernel |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - m.User = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.User |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.PerCPU) == 0 { - m.PerCPU = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Throttle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Throttle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) - } - m.Periods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Periods |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) - } - m.ThrottledPeriods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledPeriods |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) - } - m.ThrottledTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledTime |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) - } - m.Cache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Cache |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) - } - m.RSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) - } - m.RSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSSHuge |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) - } - m.MappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MappedFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) - } - m.Dirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Dirty |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) - } - m.Writeback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Writeback |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) - } - m.PgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgIn |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) - } - m.PgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgOut |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) - } - m.PgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) - } - m.PgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgMajFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) - } - m.InactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) - } - m.ActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) - } - m.InactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) - } - m.ActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) - } - m.Unevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Unevictable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) - } - m.HierarchicalMemoryLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) - } - m.HierarchicalSwapLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) - } - m.TotalCache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCache |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 19: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) - } - m.TotalRSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) - } - m.TotalRSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSSHuge |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) - } - m.TotalMappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalMappedFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) - } - m.TotalDirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalDirty |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) - } - m.TotalWriteback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalWriteback |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) - } - m.TotalPgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgIn |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) - } - m.TotalPgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgOut |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 26: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) - } - m.TotalPgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 27: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) - } - m.TotalPgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgMajFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 28: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) - } - m.TotalInactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) - } - m.TotalActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) - } - m.TotalInactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 31: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) - } - m.TotalActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 32: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) - } - m.TotalUnevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalUnevictable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &MemoryEntry{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Swap == nil { - m.Swap = &MemoryEntry{} - } - if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kernel == nil { - m.Kernel = &MemoryEntry{} - } - if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KernelTCP == nil { - m.KernelTCP = &MemoryEntry{} - } - if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) - } - m.OomKillDisable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OomKillDisable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) - } - m.UnderOom = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnderOom |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) - } - m.OomKill = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OomKill |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) - if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) - if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) - if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) - if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) - if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) - if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) - if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) - if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Op = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) - } - m.Major = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Major |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) - } - m.Minor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Minor |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Current = append(m.Current, &RdmaEntry{}) - if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limit = append(m.Limit, &RdmaEntry{}) - if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) - } - m.HcaHandles = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaHandles |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) - } - m.HcaObjects = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaObjects |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) - } - m.RxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) - } - m.RxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxPackets |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) - } - m.RxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxErrors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) - } - m.RxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxDropped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) - } - m.TxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) - } - m.TxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxPackets |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) - } - m.TxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxErrors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) - } - m.TxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxDropped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CgroupStats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) - } - m.NrSleeping = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrSleeping |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) - } - m.NrRunning = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrRunning |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) - } - m.NrStopped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrStopped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) - } - m.NrUninterruptible = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrUninterruptible |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) - } - m.NrIoWait = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrIoWait |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt deleted file mode 100644 index e476cea..0000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt +++ /dev/null @@ -1,790 +0,0 @@ -file { - name: "github.com/containerd/cgroups/stats/v1/metrics.proto" - package: "io.containerd.cgroups.v1" - dependency: "gogoproto/gogo.proto" - message_type { - name: "Metrics" - field { - name: "hugetlb" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.HugetlbStat" - json_name: "hugetlb" - } - field { - name: "pids" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.PidsStat" - json_name: "pids" - } - field { - name: "cpu" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.CPUStat" - options { - 65004: "CPU" - } - json_name: "cpu" - } - field { - name: "memory" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryStat" - json_name: "memory" - } - field { - name: "blkio" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOStat" - json_name: "blkio" - } - field { - name: "rdma" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.RdmaStat" - json_name: "rdma" - } - field { - name: "network" - number: 7 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.NetworkStat" - json_name: "network" - } - field { - name: "cgroup_stats" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.CgroupStats" - json_name: "cgroupStats" - } - field { - name: "memory_oom_control" - number: 9 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryOomControl" - json_name: "memoryOomControl" - } - } - message_type { - name: "HugetlbStat" - field { - name: "usage" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "usage" - } - field { - name: "max" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "max" - } - field { - name: "failcnt" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "failcnt" - } - field { - name: "pagesize" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "pagesize" - } - } - message_type { - name: "PidsStat" - field { - name: "current" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "current" - } - field { - name: "limit" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "limit" - } - } - message_type { - name: "CPUStat" - field { - name: "usage" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.CPUUsage" - json_name: "usage" - } - field { - name: "throttling" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.Throttle" - json_name: "throttling" - } - } - message_type { - name: "CPUUsage" - field { - name: "total" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "total" - } - field { - name: "kernel" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "kernel" - } - field { - name: "user" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "user" - } - field { - name: "per_cpu" - number: 4 - label: LABEL_REPEATED - type: TYPE_UINT64 - options { - 65004: "PerCPU" - } - json_name: "perCpu" - } - } - message_type { - name: "Throttle" - field { - name: "periods" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "periods" - } - field { - name: "throttled_periods" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "throttledPeriods" - } - field { - name: "throttled_time" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "throttledTime" - } - } - message_type { - name: "MemoryStat" - field { - name: "cache" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "cache" - } - field { - name: "rss" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "RSS" - } - json_name: "rss" - } - field { - name: "rss_huge" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "RSSHuge" - } - json_name: "rssHuge" - } - field { - name: "mapped_file" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "mappedFile" - } - field { - name: "dirty" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "dirty" - } - field { - name: "writeback" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "writeback" - } - field { - name: "pg_pg_in" - number: 7 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgPgIn" - } - field { - name: "pg_pg_out" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgPgOut" - } - field { - name: "pg_fault" - number: 9 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgFault" - } - field { - name: "pg_maj_fault" - number: 10 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgMajFault" - } - field { - name: "inactive_anon" - number: 11 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "inactiveAnon" - } - field { - name: "active_anon" - number: 12 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "activeAnon" - } - field { - name: "inactive_file" - number: 13 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "inactiveFile" - } - field { - name: "active_file" - number: 14 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "activeFile" - } - field { - name: "unevictable" - number: 15 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "unevictable" - } - field { - name: "hierarchical_memory_limit" - number: 16 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "hierarchicalMemoryLimit" - } - field { - name: "hierarchical_swap_limit" - number: 17 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "hierarchicalSwapLimit" - } - field { - name: "total_cache" - number: 18 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalCache" - } - field { - name: "total_rss" - number: 19 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "TotalRSS" - } - json_name: "totalRss" - } - field { - name: "total_rss_huge" - number: 20 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "TotalRSSHuge" - } - json_name: "totalRssHuge" - } - field { - name: "total_mapped_file" - number: 21 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalMappedFile" - } - field { - name: "total_dirty" - number: 22 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalDirty" - } - field { - name: "total_writeback" - number: 23 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalWriteback" - } - field { - name: "total_pg_pg_in" - number: 24 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgPgIn" - } - field { - name: "total_pg_pg_out" - number: 25 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgPgOut" - } - field { - name: "total_pg_fault" - number: 26 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgFault" - } - field { - name: "total_pg_maj_fault" - number: 27 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgMajFault" - } - field { - name: "total_inactive_anon" - number: 28 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalInactiveAnon" - } - field { - name: "total_active_anon" - number: 29 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalActiveAnon" - } - field { - name: "total_inactive_file" - number: 30 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalInactiveFile" - } - field { - name: "total_active_file" - number: 31 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalActiveFile" - } - field { - name: "total_unevictable" - number: 32 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalUnevictable" - } - field { - name: "usage" - number: 33 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - json_name: "usage" - } - field { - name: "swap" - number: 34 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - json_name: "swap" - } - field { - name: "kernel" - number: 35 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - json_name: "kernel" - } - field { - name: "kernel_tcp" - number: 36 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - options { - 65004: "KernelTCP" - } - json_name: "kernelTcp" - } - } - message_type { - name: "MemoryEntry" - field { - name: "limit" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "limit" - } - field { - name: "usage" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "usage" - } - field { - name: "max" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "max" - } - field { - name: "failcnt" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "failcnt" - } - } - message_type { - name: "MemoryOomControl" - field { - name: "oom_kill_disable" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "oomKillDisable" - } - field { - name: "under_oom" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "underOom" - } - field { - name: "oom_kill" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "oomKill" - } - } - message_type { - name: "BlkIOStat" - field { - name: "io_service_bytes_recursive" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioServiceBytesRecursive" - } - field { - name: "io_serviced_recursive" - number: 2 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioServicedRecursive" - } - field { - name: "io_queued_recursive" - number: 3 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioQueuedRecursive" - } - field { - name: "io_service_time_recursive" - number: 4 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioServiceTimeRecursive" - } - field { - name: "io_wait_time_recursive" - number: 5 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioWaitTimeRecursive" - } - field { - name: "io_merged_recursive" - number: 6 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioMergedRecursive" - } - field { - name: "io_time_recursive" - number: 7 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioTimeRecursive" - } - field { - name: "sectors_recursive" - number: 8 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "sectorsRecursive" - } - } - message_type { - name: "BlkIOEntry" - field { - name: "op" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "op" - } - field { - name: "device" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "device" - } - field { - name: "major" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "major" - } - field { - name: "minor" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "minor" - } - field { - name: "value" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "value" - } - } - message_type { - name: "RdmaStat" - field { - name: "current" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.RdmaEntry" - json_name: "current" - } - field { - name: "limit" - number: 2 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.RdmaEntry" - json_name: "limit" - } - } - message_type { - name: "RdmaEntry" - field { - name: "device" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "device" - } - field { - name: "hca_handles" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT32 - json_name: "hcaHandles" - } - field { - name: "hca_objects" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT32 - json_name: "hcaObjects" - } - } - message_type { - name: "NetworkStat" - field { - name: "name" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "name" - } - field { - name: "rx_bytes" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxBytes" - } - field { - name: "rx_packets" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxPackets" - } - field { - name: "rx_errors" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxErrors" - } - field { - name: "rx_dropped" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxDropped" - } - field { - name: "tx_bytes" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txBytes" - } - field { - name: "tx_packets" - number: 7 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txPackets" - } - field { - name: "tx_errors" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txErrors" - } - field { - name: "tx_dropped" - number: 9 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txDropped" - } - } - message_type { - name: "CgroupStats" - field { - name: "nr_sleeping" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrSleeping" - } - field { - name: "nr_running" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrRunning" - } - field { - name: "nr_stopped" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrStopped" - } - field { - name: "nr_uninterruptible" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrUninterruptible" - } - field { - name: "nr_io_wait" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrIoWait" - } - } - syntax: "proto3" -} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto deleted file mode 100644 index b3f6cc3..0000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto +++ /dev/null @@ -1,158 +0,0 @@ -syntax = "proto3"; - -package io.containerd.cgroups.v1; - -import "gogoproto/gogo.proto"; - -message Metrics { - repeated HugetlbStat hugetlb = 1; - PidsStat pids = 2; - CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; - MemoryStat memory = 4; - BlkIOStat blkio = 5; - RdmaStat rdma = 6; - repeated NetworkStat network = 7; - CgroupStats cgroup_stats = 8; - MemoryOomControl memory_oom_control = 9; -} - -message HugetlbStat { - uint64 usage = 1; - uint64 max = 2; - uint64 failcnt = 3; - string pagesize = 4; -} - -message PidsStat { - uint64 current = 1; - uint64 limit = 2; -} - -message CPUStat { - CPUUsage usage = 1; - Throttle throttling = 2; -} - -message CPUUsage { - // values in nanoseconds - uint64 total = 1; - uint64 kernel = 2; - uint64 user = 3; - repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; - -} - -message Throttle { - uint64 periods = 1; - uint64 throttled_periods = 2; - uint64 throttled_time = 3; -} - -message MemoryStat { - uint64 cache = 1; - uint64 rss = 2 [(gogoproto.customname) = "RSS"]; - uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; - uint64 mapped_file = 4; - uint64 dirty = 5; - uint64 writeback = 6; - uint64 pg_pg_in = 7; - uint64 pg_pg_out = 8; - uint64 pg_fault = 9; - uint64 pg_maj_fault = 10; - uint64 inactive_anon = 11; - uint64 active_anon = 12; - uint64 inactive_file = 13; - uint64 active_file = 14; - uint64 unevictable = 15; - uint64 hierarchical_memory_limit = 16; - uint64 hierarchical_swap_limit = 17; - uint64 total_cache = 18; - uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; - uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; - uint64 total_mapped_file = 21; - uint64 total_dirty = 22; - uint64 total_writeback = 23; - uint64 total_pg_pg_in = 24; - uint64 total_pg_pg_out = 25; - uint64 total_pg_fault = 26; - uint64 total_pg_maj_fault = 27; - uint64 total_inactive_anon = 28; - uint64 total_active_anon = 29; - uint64 total_inactive_file = 30; - uint64 total_active_file = 31; - uint64 total_unevictable = 32; - MemoryEntry usage = 33; - MemoryEntry swap = 34; - MemoryEntry kernel = 35; - MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; - -} - -message MemoryEntry { - uint64 limit = 1; - uint64 usage = 2; - uint64 max = 3; - uint64 failcnt = 4; -} - -message MemoryOomControl { - uint64 oom_kill_disable = 1; - uint64 under_oom = 2; - uint64 oom_kill = 3; -} - -message BlkIOStat { - repeated BlkIOEntry io_service_bytes_recursive = 1; - repeated BlkIOEntry io_serviced_recursive = 2; - repeated BlkIOEntry io_queued_recursive = 3; - repeated BlkIOEntry io_service_time_recursive = 4; - repeated BlkIOEntry io_wait_time_recursive = 5; - repeated BlkIOEntry io_merged_recursive = 6; - repeated BlkIOEntry io_time_recursive = 7; - repeated BlkIOEntry sectors_recursive = 8; -} - -message BlkIOEntry { - string op = 1; - string device = 2; - uint64 major = 3; - uint64 minor = 4; - uint64 value = 5; -} - -message RdmaStat { - repeated RdmaEntry current = 1; - repeated RdmaEntry limit = 2; -} - -message RdmaEntry { - string device = 1; - uint32 hca_handles = 2; - uint32 hca_objects = 3; -} - -message NetworkStat { - string name = 1; - uint64 rx_bytes = 2; - uint64 rx_packets = 3; - uint64 rx_errors = 4; - uint64 rx_dropped = 5; - uint64 tx_bytes = 6; - uint64 tx_packets = 7; - uint64 tx_errors = 8; - uint64 tx_dropped = 9; -} - -// CgroupStats exports per-cgroup statistics. -message CgroupStats { - // number of tasks sleeping - uint64 nr_sleeping = 1; - // number of tasks running - uint64 nr_running = 2; - // number of tasks in stopped state - uint64 nr_stopped = 3; - // number of tasks in uninterruptible state - uint64 nr_uninterruptible = 4; - // number of tasks waiting on IO - uint64 nr_io_wait = 5; -} diff --git a/vendor/github.com/containerd/cgroups/subsystem.go b/vendor/github.com/containerd/cgroups/subsystem.go deleted file mode 100644 index b2f4185..0000000 --- a/vendor/github.com/containerd/cgroups/subsystem.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "os" - - v1 "github.com/containerd/cgroups/stats/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Name is a typed name for a cgroup subsystem -type Name string - -const ( - Devices Name = "devices" - Hugetlb Name = "hugetlb" - Freezer Name = "freezer" - Pids Name = "pids" - NetCLS Name = "net_cls" - NetPrio Name = "net_prio" - PerfEvent Name = "perf_event" - Cpuset Name = "cpuset" - Cpu Name = "cpu" - Cpuacct Name = "cpuacct" - Memory Name = "memory" - Blkio Name = "blkio" - Rdma Name = "rdma" -) - -// Subsystems returns a complete list of the default cgroups -// available on most linux systems -func Subsystems() []Name { - n := []Name{ - Freezer, - Pids, - NetCLS, - NetPrio, - PerfEvent, - Cpuset, - Cpu, - Cpuacct, - Memory, - Blkio, - Rdma, - } - if !RunningInUserNS() { - n = append(n, Devices) - } - if _, err := os.Stat("/sys/kernel/mm/hugepages"); err == nil { - n = append(n, Hugetlb) - } - return n -} - -type Subsystem interface { - Name() Name -} - -type pather interface { - Subsystem - Path(path string) string -} - -type creator interface { - Subsystem - Create(path string, resources *specs.LinuxResources) error -} - -type deleter interface { - Subsystem - Delete(path string) error -} - -type stater interface { - Subsystem - Stat(path string, stats *v1.Metrics) error -} - -type updater interface { - Subsystem - Update(path string, resources *specs.LinuxResources) error -} - -// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy -func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy { - return func() ([]Subsystem, error) { - subsystems, err := baseHierarchy() - if err != nil { - return nil, err - } - for _, s := range subsystems { - if s.Name() == subsystem { - return []Subsystem{ - s, - }, nil - } - } - return nil, fmt.Errorf("unable to find subsystem %s", subsystem) - } -} diff --git a/vendor/github.com/containerd/cgroups/systemd.go b/vendor/github.com/containerd/cgroups/systemd.go deleted file mode 100644 index c17f34a..0000000 --- a/vendor/github.com/containerd/cgroups/systemd.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "path/filepath" - "strings" - "sync" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - SystemdDbus Name = "systemd" - defaultSlice = "system.slice" -) - -var ( - canDelegate bool - once sync.Once -) - -func Systemd() ([]Subsystem, error) { - root, err := v1MountPoint() - if err != nil { - return nil, err - } - defaultSubsystems, err := defaults(root) - if err != nil { - return nil, err - } - s, err := NewSystemd(root) - if err != nil { - return nil, err - } - // make sure the systemd controller is added first - return append([]Subsystem{s}, defaultSubsystems...), nil -} - -func Slice(slice, name string) Path { - if slice == "" { - slice = defaultSlice - } - return func(subsystem Name) (string, error) { - return filepath.Join(slice, name), nil - } -} - -func NewSystemd(root string) (*SystemdController, error) { - return &SystemdController{ - root: root, - }, nil -} - -type SystemdController struct { - mu sync.Mutex - root string -} - -func (s *SystemdController) Name() Name { - return SystemdDbus -} - -func (s *SystemdController) Create(path string, _ *specs.LinuxResources) error { - conn, err := systemdDbus.New() - if err != nil { - return err - } - defer conn.Close() - slice, name := splitName(path) - // We need to see if systemd can handle the delegate property - // Systemd will return an error if it cannot handle delegate regardless - // of its bool setting. - checkDelegate := func() { - canDelegate = true - dlSlice := newProperty("Delegate", true) - if _, err := conn.StartTransientUnit(slice, "testdelegate", []systemdDbus.Property{dlSlice}, nil); err != nil { - if dbusError, ok := err.(dbus.Error); ok { - // Starting with systemd v237, Delegate is not even a property of slices anymore, - // so the D-Bus call fails with "InvalidArgs" error. - if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") || strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.InvalidArgs") { - canDelegate = false - } - } - } - - conn.StopUnit(slice, "testDelegate", nil) - } - once.Do(checkDelegate) - properties := []systemdDbus.Property{ - systemdDbus.PropDescription("cgroup " + name), - systemdDbus.PropWants(slice), - newProperty("DefaultDependencies", false), - newProperty("MemoryAccounting", true), - newProperty("CPUAccounting", true), - newProperty("BlockIOAccounting", true), - } - - // If we can delegate, we add the property back in - if canDelegate { - properties = append(properties, newProperty("Delegate", true)) - } - - ch := make(chan string) - _, err = conn.StartTransientUnit(name, "replace", properties, ch) - if err != nil { - return err - } - <-ch - return nil -} - -func (s *SystemdController) Delete(path string) error { - conn, err := systemdDbus.New() - if err != nil { - return err - } - defer conn.Close() - _, name := splitName(path) - ch := make(chan string) - _, err = conn.StopUnit(name, "replace", ch) - if err != nil { - return err - } - <-ch - return nil -} - -func newProperty(name string, units interface{}) systemdDbus.Property { - return systemdDbus.Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -func splitName(path string) (slice string, unit string) { - slice, unit = filepath.Split(path) - return strings.TrimSuffix(slice, "/"), unit -} diff --git a/vendor/github.com/containerd/cgroups/ticks.go b/vendor/github.com/containerd/cgroups/ticks.go deleted file mode 100644 index 84dc38d..0000000 --- a/vendor/github.com/containerd/cgroups/ticks.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -func getClockTicks() uint64 { - // The value comes from `C.sysconf(C._SC_CLK_TCK)`, and - // on Linux it's a constant which is safe to be hard coded, - // so we can avoid using cgo here. - // See https://github.com/containerd/cgroups/pull/12 for - // more details. - return 100 -} diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go deleted file mode 100644 index ed894b3..0000000 --- a/vendor/github.com/containerd/cgroups/utils.go +++ /dev/null @@ -1,404 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - units "github.com/docker/go-units" - specs "github.com/opencontainers/runtime-spec/specs-go" - "golang.org/x/sys/unix" -) - -var ( - nsOnce sync.Once - inUserNS bool - checkMode sync.Once - cgMode CGMode -) - -const unifiedMountpoint = "/sys/fs/cgroup" - -// CGMode is the cgroups mode of the host system -type CGMode int - -const ( - // Unavailable cgroup mountpoint - Unavailable CGMode = iota - // Legacy cgroups v1 - Legacy - // Hybrid with cgroups v1 and v2 controllers mounted - Hybrid - // Unified with only cgroups v2 mounted - Unified -) - -// Mode returns the cgroups mode running on the host -func Mode() CGMode { - checkMode.Do(func() { - var st unix.Statfs_t - if err := unix.Statfs(unifiedMountpoint, &st); err != nil { - cgMode = Unavailable - return - } - switch st.Type { - case unix.CGROUP2_SUPER_MAGIC: - cgMode = Unified - default: - cgMode = Legacy - if err := unix.Statfs(filepath.Join(unifiedMountpoint, "unified"), &st); err != nil { - return - } - if st.Type == unix.CGROUP2_SUPER_MAGIC { - cgMode = Hybrid - } - } - }) - return cgMode -} - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - nsOnce.Do(func() { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return - } - inUserNS = true - }) - return inUserNS -} - -// defaults returns all known groups -func defaults(root string) ([]Subsystem, error) { - h, err := NewHugetlb(root) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - s := []Subsystem{ - NewNamed(root, "systemd"), - NewFreezer(root), - NewPids(root), - NewNetCls(root), - NewNetPrio(root), - NewPerfEvent(root), - NewCpuset(root), - NewCpu(root), - NewCpuacct(root), - NewMemory(root), - NewBlkio(root), - NewRdma(root), - } - // only add the devices cgroup if we are not in a user namespace - // because modifications are not allowed - if !RunningInUserNS() { - s = append(s, NewDevices(root)) - } - // add the hugetlb cgroup if error wasn't due to missing hugetlb - // cgroup support on the host - if err == nil { - s = append(s, h) - } - return s, nil -} - -// remove will remove a cgroup path handling EAGAIN and EBUSY errors and -// retrying the remove after a exp timeout -func remove(path string) error { - delay := 10 * time.Millisecond - for i := 0; i < 5; i++ { - if i != 0 { - time.Sleep(delay) - delay *= 2 - } - if err := os.RemoveAll(path); err == nil { - return nil - } - } - return fmt.Errorf("cgroups: unable to remove path %q", path) -} - -// readPids will read all the pids of processes in a cgroup by the provided path -func readPids(path string, subsystem Name) ([]Process, error) { - f, err := os.Open(filepath.Join(path, cgroupProcs)) - if err != nil { - return nil, err - } - defer f.Close() - var ( - out []Process - s = bufio.NewScanner(f) - ) - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.Atoi(t) - if err != nil { - return nil, err - } - out = append(out, Process{ - Pid: pid, - Subsystem: subsystem, - Path: path, - }) - } - } - if err := s.Err(); err != nil { - // failed to read all pids? - return nil, err - } - return out, nil -} - -// readTasksPids will read all the pids of tasks in a cgroup by the provided path -func readTasksPids(path string, subsystem Name) ([]Task, error) { - f, err := os.Open(filepath.Join(path, cgroupTasks)) - if err != nil { - return nil, err - } - defer f.Close() - var ( - out []Task - s = bufio.NewScanner(f) - ) - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.Atoi(t) - if err != nil { - return nil, err - } - out = append(out, Task{ - Pid: pid, - Subsystem: subsystem, - Path: path, - }) - } - } - if err := s.Err(); err != nil { - return nil, err - } - return out, nil -} - -func hugePageSizes() ([]string, error) { - var ( - pageSizes []string - sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"} - ) - files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") - if err != nil { - return nil, err - } - for _, st := range files { - nameArray := strings.Split(st.Name(), "-") - pageSize, err := units.RAMInBytes(nameArray[1]) - if err != nil { - return nil, err - } - pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)) - } - return pageSizes, nil -} - -func readUint(path string) (uint64, error) { - v, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return parseUint(strings.TrimSpace(string(v)), 10, 64) -} - -func parseUint(s string, base, bitSize int) (uint64, error) { - v, err := strconv.ParseUint(s, base, bitSize) - if err != nil { - intValue, intErr := strconv.ParseInt(s, base, bitSize) - // 1. Handle negative values greater than MinInt64 (and) - // 2. Handle negative values lesser than MinInt64 - if intErr == nil && intValue < 0 { - return 0, nil - } else if intErr != nil && - intErr.(*strconv.NumError).Err == strconv.ErrRange && - intValue < 0 { - return 0, nil - } - return 0, err - } - return v, nil -} - -func parseKV(raw string) (string, uint64, error) { - parts := strings.Fields(raw) - switch len(parts) { - case 2: - v, err := parseUint(parts[1], 10, 64) - if err != nil { - return "", 0, err - } - return parts[0], v, nil - default: - return "", 0, ErrInvalidFormat - } -} - -func parseCgroupFile(path string) (map[string]string, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - return parseCgroupFromReader(f) -} - -func parseCgroupFromReader(r io.Reader) (map[string]string, error) { - var ( - cgroups = make(map[string]string) - s = bufio.NewScanner(r) - ) - for s.Scan() { - var ( - text = s.Text() - parts = strings.SplitN(text, ":", 3) - ) - if len(parts) < 3 { - return nil, fmt.Errorf("invalid cgroup entry: %q", text) - } - for _, subs := range strings.Split(parts[1], ",") { - if subs != "" { - cgroups[subs] = parts[2] - } - } - } - if err := s.Err(); err != nil { - return nil, err - } - return cgroups, nil -} - -func getCgroupDestination(subsystem string) (string, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", err - } - defer f.Close() - s := bufio.NewScanner(f) - for s.Scan() { - fields := strings.Split(s.Text(), " ") - if len(fields) < 10 { - // broken mountinfo? - continue - } - if fields[len(fields)-3] != "cgroup" { - continue - } - for _, opt := range strings.Split(fields[len(fields)-1], ",") { - if opt == subsystem { - return fields[3], nil - } - } - } - if err := s.Err(); err != nil { - return "", err - } - return "", ErrNoCgroupMountDestination -} - -func pathers(subystems []Subsystem) []pather { - var out []pather - for _, s := range subystems { - if p, ok := s.(pather); ok { - out = append(out, p) - } - } - return out -} - -func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error { - if c, ok := s.(creator); ok { - p, err := path(s.Name()) - if err != nil { - return err - } - if err := c.Create(p, resources); err != nil { - return err - } - } else if c, ok := s.(pather); ok { - p, err := path(s.Name()) - if err != nil { - return err - } - // do the default create if the group does not have a custom one - if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil { - return err - } - } - return nil -} - -func cleanPath(path string) string { - if path == "" { - return "" - } - path = filepath.Clean(path) - if !filepath.IsAbs(path) { - path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path)) - } - return path -} - -func retryingWriteFile(path string, data []byte, mode os.FileMode) error { - // Retry writes on EINTR; see: - // https://github.com/golang/go/issues/38033 - for { - err := ioutil.WriteFile(path, data, mode) - if err == nil { - return nil - } else if !errors.Is(err, syscall.EINTR) { - return err - } - } -} diff --git a/vendor/github.com/containerd/cgroups/v1.go b/vendor/github.com/containerd/cgroups/v1.go deleted file mode 100644 index 2ec215c..0000000 --- a/vendor/github.com/containerd/cgroups/v1.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strings" -) - -// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy -func V1() ([]Subsystem, error) { - root, err := v1MountPoint() - if err != nil { - return nil, err - } - subsystems, err := defaults(root) - if err != nil { - return nil, err - } - var enabled []Subsystem - for _, s := range pathers(subsystems) { - // check and remove the default groups that do not exist - if _, err := os.Lstat(s.Path("/")); err == nil { - enabled = append(enabled, s) - } - } - return enabled, nil -} - -// v1MountPoint returns the mount point where the cgroup -// mountpoints are mounted in a single hiearchy -func v1MountPoint() (string, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", err - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - var ( - text = scanner.Text() - fields = strings.Split(text, " ") - numFields = len(fields) - ) - if numFields < 10 { - return "", fmt.Errorf("mountinfo: bad entry %q", text) - } - if fields[numFields-3] == "cgroup" { - return filepath.Dir(fields[4]), nil - } - } - if err := scanner.Err(); err != nil { - return "", err - } - return "", ErrMountPointNotExist -} diff --git a/vendor/github.com/containerd/cgroups/v2/cpu.go b/vendor/github.com/containerd/cgroups/v2/cpu.go deleted file mode 100644 index 65282ff..0000000 --- a/vendor/github.com/containerd/cgroups/v2/cpu.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "math" - "strconv" - "strings" -) - -type CPUMax string - -func NewCPUMax(quota *int64, period *uint64) CPUMax { - max := "max" - if quota != nil { - max = strconv.FormatInt(*quota, 10) - } - return CPUMax(strings.Join([]string{max, strconv.FormatUint(*period, 10)}, " ")) -} - -type CPU struct { - Weight *uint64 - Max CPUMax - Cpus string - Mems string -} - -func (c CPUMax) extractQuotaAndPeriod() (int64, uint64) { - var ( - quota int64 - period uint64 - ) - values := strings.Split(string(c), " ") - if values[0] == "max" { - quota = math.MaxInt64 - } else { - quota, _ = strconv.ParseInt(values[0], 10, 64) - } - period, _ = strconv.ParseUint(values[1], 10, 64) - return quota, period -} - -func (r *CPU) Values() (o []Value) { - if r.Weight != nil { - o = append(o, Value{ - filename: "cpu.weight", - value: *r.Weight, - }) - } - if r.Max != "" { - o = append(o, Value{ - filename: "cpu.max", - value: r.Max, - }) - } - if r.Cpus != "" { - o = append(o, Value{ - filename: "cpuset.cpus", - value: r.Cpus, - }) - } - if r.Mems != "" { - o = append(o, Value{ - filename: "cpuset.mems", - value: r.Mems, - }) - } - return o -} diff --git a/vendor/github.com/containerd/cgroups/v2/devicefilter.go b/vendor/github.com/containerd/cgroups/v2/devicefilter.go deleted file mode 100644 index 4b8c32b..0000000 --- a/vendor/github.com/containerd/cgroups/v2/devicefilter.go +++ /dev/null @@ -1,199 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Devicefilter containes eBPF device filter program -// -// The implementation is based on https://github.com/containers/crun/blob/0.10.2/src/libcrun/ebpf.c -// -// Although ebpf.c is originally licensed under LGPL-3.0-or-later, the author (Giuseppe Scrivano) -// agreed to relicense the file in Apache License 2.0: https://github.com/opencontainers/runc/issues/2144#issuecomment-543116397 -// -// This particular Go implementation based on runc version -// https://github.com/opencontainers/runc/blob/master/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go -package v2 - -import ( - "fmt" - "math" - - "github.com/cilium/ebpf/asm" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -const ( - // license string format is same as kernel MODULE_LICENSE macro - license = "Apache" -) - -// DeviceFilter returns eBPF device filter program and its license string -func DeviceFilter(devices []specs.LinuxDeviceCgroup) (asm.Instructions, string, error) { - p := &program{} - p.init() - for i := len(devices) - 1; i >= 0; i-- { - if err := p.appendDevice(devices[i]); err != nil { - return nil, "", err - } - } - insts, err := p.finalize() - return insts, license, err -} - -type program struct { - insts asm.Instructions - hasWildCard bool - blockID int -} - -func (p *program) init() { - // struct bpf_cgroup_dev_ctx: https://elixir.bootlin.com/linux/v5.3.6/source/include/uapi/linux/bpf.h#L3423 - /* - u32 access_type - u32 major - u32 minor - */ - // R2 <- type (lower 16 bit of u32 access_type at R1[0]) - p.insts = append(p.insts, - asm.LoadMem(asm.R2, asm.R1, 0, asm.Half)) - - // R3 <- access (upper 16 bit of u32 access_type at R1[0]) - p.insts = append(p.insts, - asm.LoadMem(asm.R3, asm.R1, 0, asm.Word), - // RSh: bitwise shift right - asm.RSh.Imm32(asm.R3, 16)) - - // R4 <- major (u32 major at R1[4]) - p.insts = append(p.insts, - asm.LoadMem(asm.R4, asm.R1, 4, asm.Word)) - - // R5 <- minor (u32 minor at R1[8]) - p.insts = append(p.insts, - asm.LoadMem(asm.R5, asm.R1, 8, asm.Word)) -} - -// appendDevice needs to be called from the last element of OCI linux.resources.devices to the head element. -func (p *program) appendDevice(dev specs.LinuxDeviceCgroup) error { - if p.blockID < 0 { - return errors.New("the program is finalized") - } - if p.hasWildCard { - // All entries after wildcard entry are ignored - return nil - } - - bpfType := int32(-1) - hasType := true - switch dev.Type { - case string('c'): - bpfType = int32(unix.BPF_DEVCG_DEV_CHAR) - case string('b'): - bpfType = int32(unix.BPF_DEVCG_DEV_BLOCK) - case string('a'): - hasType = false - default: - // if not specified in OCI json, typ is set to DeviceTypeAll - return errors.Errorf("invalid DeviceType %q", dev.Type) - } - if *dev.Major > math.MaxUint32 { - return errors.Errorf("invalid major %d", *dev.Major) - } - if *dev.Minor > math.MaxUint32 { - return errors.Errorf("invalid minor %d", *dev.Major) - } - hasMajor := *dev.Major >= 0 // if not specified in OCI json, major is set to -1 - hasMinor := *dev.Minor >= 0 - bpfAccess := int32(0) - for _, r := range dev.Access { - switch r { - case 'r': - bpfAccess |= unix.BPF_DEVCG_ACC_READ - case 'w': - bpfAccess |= unix.BPF_DEVCG_ACC_WRITE - case 'm': - bpfAccess |= unix.BPF_DEVCG_ACC_MKNOD - default: - return errors.Errorf("unknown device access %v", r) - } - } - // If the access is rwm, skip the check. - hasAccess := bpfAccess != (unix.BPF_DEVCG_ACC_READ | unix.BPF_DEVCG_ACC_WRITE | unix.BPF_DEVCG_ACC_MKNOD) - - blockSym := fmt.Sprintf("block-%d", p.blockID) - nextBlockSym := fmt.Sprintf("block-%d", p.blockID+1) - prevBlockLastIdx := len(p.insts) - 1 - if hasType { - p.insts = append(p.insts, - // if (R2 != bpfType) goto next - asm.JNE.Imm(asm.R2, bpfType, nextBlockSym), - ) - } - if hasAccess { - p.insts = append(p.insts, - // if (R3 & bpfAccess == 0 /* use R1 as a temp var */) goto next - asm.Mov.Reg32(asm.R1, asm.R3), - asm.And.Imm32(asm.R1, bpfAccess), - asm.JEq.Imm(asm.R1, 0, nextBlockSym), - ) - } - if hasMajor { - p.insts = append(p.insts, - // if (R4 != major) goto next - asm.JNE.Imm(asm.R4, int32(*dev.Major), nextBlockSym), - ) - } - if hasMinor { - p.insts = append(p.insts, - // if (R5 != minor) goto next - asm.JNE.Imm(asm.R5, int32(*dev.Minor), nextBlockSym), - ) - } - if !hasType && !hasAccess && !hasMajor && !hasMinor { - p.hasWildCard = true - } - p.insts = append(p.insts, acceptBlock(dev.Allow)...) - // set blockSym to the first instruction we added in this iteration - p.insts[prevBlockLastIdx+1] = p.insts[prevBlockLastIdx+1].Sym(blockSym) - p.blockID++ - return nil -} - -func (p *program) finalize() (asm.Instructions, error) { - if p.hasWildCard { - // acceptBlock with asm.Return() is already inserted - return p.insts, nil - } - blockSym := fmt.Sprintf("block-%d", p.blockID) - p.insts = append(p.insts, - // R0 <- 0 - asm.Mov.Imm32(asm.R0, 0).Sym(blockSym), - asm.Return(), - ) - p.blockID = -1 - return p.insts, nil -} - -func acceptBlock(accept bool) asm.Instructions { - v := int32(0) - if accept { - v = 1 - } - return []asm.Instruction{ - // R0 <- v - asm.Mov.Imm32(asm.R0, v), - asm.Return(), - } -} diff --git a/vendor/github.com/containerd/cgroups/v2/ebpf.go b/vendor/github.com/containerd/cgroups/v2/ebpf.go deleted file mode 100644 index bd38481..0000000 --- a/vendor/github.com/containerd/cgroups/v2/ebpf.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/link" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// LoadAttachCgroupDeviceFilter installs eBPF device filter program to /sys/fs/cgroup/ directory. -// -// Requires the system to be running in cgroup2 unified-mode with kernel >= 4.15 . -// -// https://github.com/torvalds/linux/commit/ebc614f687369f9df99828572b1d85a7c2de3d92 -func LoadAttachCgroupDeviceFilter(insts asm.Instructions, license string, dirFD int) (func() error, error) { - nilCloser := func() error { - return nil - } - spec := &ebpf.ProgramSpec{ - Type: ebpf.CGroupDevice, - Instructions: insts, - License: license, - } - prog, err := ebpf.NewProgram(spec) - if err != nil { - return nilCloser, err - } - err = link.RawAttachProgram(link.RawAttachProgramOptions{ - Target: dirFD, - Program: prog, - Attach: ebpf.AttachCGroupDevice, - Flags: unix.BPF_F_ALLOW_MULTI, - }) - if err != nil { - return nilCloser, errors.Wrap(err, "failed to call BPF_PROG_ATTACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI)") - } - closer := func() error { - err = link.RawDetachProgram(link.RawDetachProgramOptions{ - Target: dirFD, - Program: prog, - Attach: ebpf.AttachCGroupDevice, - }) - if err != nil { - return errors.Wrap(err, "failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE)") - } - return nil - } - return closer, nil -} - -func isRWM(cgroupPermissions string) bool { - r := false - w := false - m := false - for _, rn := range cgroupPermissions { - switch rn { - case 'r': - r = true - case 'w': - w = true - case 'm': - m = true - } - } - return r && w && m -} - -// the logic is from runc -// https://github.com/opencontainers/runc/blob/master/libcontainer/cgroups/fs/devices_v2.go#L44 -func canSkipEBPFError(devices []specs.LinuxDeviceCgroup) bool { - for _, dev := range devices { - if dev.Allow || !isRWM(dev.Access) { - return false - } - } - return true -} diff --git a/vendor/github.com/containerd/cgroups/v2/errors.go b/vendor/github.com/containerd/cgroups/v2/errors.go deleted file mode 100644 index dfab548..0000000 --- a/vendor/github.com/containerd/cgroups/v2/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "errors" - "os" -) - -var ( - ErrInvalidPid = errors.New("cgroups: pid must be greater than 0") - ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist") - ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed") - ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup (v2) not supported on this system") - ErrMemoryNotSupported = errors.New("cgroups: memory cgroup (v2) not supported on this system") - ErrPidsNotSupported = errors.New("cgroups: pids cgroup (v2) not supported on this system") - ErrCPUNotSupported = errors.New("cgroups: cpu cgroup (v2) not supported on this system") - ErrCgroupDeleted = errors.New("cgroups: cgroup deleted") - ErrNoCgroupMountDestination = errors.New("cgroups: cannot find cgroup mount destination") - ErrInvalidGroupPath = errors.New("cgroups: invalid group path") -) - -// ErrorHandler is a function that handles and acts on errors -type ErrorHandler func(err error) error - -// IgnoreNotExist ignores any errors that are for not existing files -func IgnoreNotExist(err error) error { - if os.IsNotExist(err) { - return nil - } - return err -} diff --git a/vendor/github.com/containerd/cgroups/v2/io.go b/vendor/github.com/containerd/cgroups/v2/io.go deleted file mode 100644 index 70078d5..0000000 --- a/vendor/github.com/containerd/cgroups/v2/io.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import "fmt" - -type IOType string - -const ( - ReadBPS IOType = "rbps" - WriteBPS IOType = "wbps" - ReadIOPS IOType = "riops" - WriteIOPS IOType = "wiops" -) - -type BFQ struct { - Weight uint16 -} - -type Entry struct { - Type IOType - Major int64 - Minor int64 - Rate uint64 -} - -func (e Entry) String() string { - return fmt.Sprintf("%d:%d %s=%d", e.Major, e.Minor, e.Type, e.Rate) -} - -type IO struct { - BFQ BFQ - Max []Entry -} - -func (i *IO) Values() (o []Value) { - if i.BFQ.Weight != 0 { - o = append(o, Value{ - filename: "io.bfq.weight", - value: i.BFQ.Weight, - }) - } - for _, e := range i.Max { - o = append(o, Value{ - filename: "io.max", - value: e.String(), - }) - } - return o -} diff --git a/vendor/github.com/containerd/cgroups/v2/manager.go b/vendor/github.com/containerd/cgroups/v2/manager.go deleted file mode 100644 index 3bb546c..0000000 --- a/vendor/github.com/containerd/cgroups/v2/manager.go +++ /dev/null @@ -1,782 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "bufio" - "io/ioutil" - "math" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/containerd/cgroups/v2/stats" - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const ( - subtreeControl = "cgroup.subtree_control" - controllersFile = "cgroup.controllers" - defaultCgroup2Path = "/sys/fs/cgroup" - defaultSlice = "system.slice" -) - -var ( - canDelegate bool -) - -type Event struct { - Low uint64 - High uint64 - Max uint64 - OOM uint64 - OOMKill uint64 -} - -// Resources for a cgroups v2 unified hierarchy -type Resources struct { - CPU *CPU - Memory *Memory - Pids *Pids - IO *IO - RDMA *RDMA - HugeTlb *HugeTlb - // When len(Devices) is zero, devices are not controlled - Devices []specs.LinuxDeviceCgroup -} - -// Values returns the raw filenames and values that -// can be written to the unified hierarchy -func (r *Resources) Values() (o []Value) { - if r.CPU != nil { - o = append(o, r.CPU.Values()...) - } - if r.Memory != nil { - o = append(o, r.Memory.Values()...) - } - if r.Pids != nil { - o = append(o, r.Pids.Values()...) - } - if r.IO != nil { - o = append(o, r.IO.Values()...) - } - if r.RDMA != nil { - o = append(o, r.RDMA.Values()...) - } - if r.HugeTlb != nil { - o = append(o, r.HugeTlb.Values()...) - } - return o -} - -// EnabledControllers returns the list of all not nil resource controllers -func (r *Resources) EnabledControllers() (c []string) { - if r.CPU != nil { - c = append(c, "cpu") - c = append(c, "cpuset") - } - if r.Memory != nil { - c = append(c, "memory") - } - if r.Pids != nil { - c = append(c, "pids") - } - if r.IO != nil { - c = append(c, "io") - } - if r.RDMA != nil { - c = append(c, "rdma") - } - if r.HugeTlb != nil { - c = append(c, "hugetlb") - } - return -} - -// Value of a cgroup setting -type Value struct { - filename string - value interface{} -} - -// write the value to the full, absolute path, of a unified hierarchy -func (c *Value) write(path string, perm os.FileMode) error { - var data []byte - switch t := c.value.(type) { - case uint64: - data = []byte(strconv.FormatUint(t, 10)) - case uint16: - data = []byte(strconv.FormatUint(uint64(t), 10)) - case int64: - data = []byte(strconv.FormatInt(t, 10)) - case []byte: - data = t - case string: - data = []byte(t) - case CPUMax: - data = []byte(t) - default: - return ErrInvalidFormat - } - - // Retry writes on EINTR; see: - // https://github.com/golang/go/issues/38033 - for { - err := ioutil.WriteFile( - filepath.Join(path, c.filename), - data, - perm, - ) - if err == nil { - return nil - } else if !errors.Is(err, syscall.EINTR) { - return err - } - } -} - -func writeValues(path string, values []Value) error { - for _, o := range values { - if err := o.write(path, defaultFilePerm); err != nil { - return err - } - } - return nil -} - -func NewManager(mountpoint string, group string, resources *Resources) (*Manager, error) { - if resources == nil { - return nil, errors.New("resources reference is nil") - } - if err := VerifyGroupPath(group); err != nil { - return nil, err - } - path := filepath.Join(mountpoint, group) - if err := os.MkdirAll(path, defaultDirPerm); err != nil { - return nil, err - } - m := Manager{ - unifiedMountpoint: mountpoint, - path: path, - } - if err := m.ToggleControllers(resources.EnabledControllers(), Enable); err != nil { - // clean up cgroup dir on failure - os.Remove(path) - return nil, err - } - if err := setResources(path, resources); err != nil { - os.Remove(path) - return nil, err - } - return &m, nil -} - -func LoadManager(mountpoint string, group string) (*Manager, error) { - if err := VerifyGroupPath(group); err != nil { - return nil, err - } - path := filepath.Join(mountpoint, group) - return &Manager{ - unifiedMountpoint: mountpoint, - path: path, - }, nil -} - -type Manager struct { - unifiedMountpoint string - path string -} - -func setResources(path string, resources *Resources) error { - if resources != nil { - if err := writeValues(path, resources.Values()); err != nil { - return err - } - if err := setDevices(path, resources.Devices); err != nil { - return err - } - } - return nil -} - -func (c *Manager) RootControllers() ([]string, error) { - b, err := ioutil.ReadFile(filepath.Join(c.unifiedMountpoint, controllersFile)) - if err != nil { - return nil, err - } - return strings.Fields(string(b)), nil -} - -func (c *Manager) Controllers() ([]string, error) { - b, err := ioutil.ReadFile(filepath.Join(c.path, controllersFile)) - if err != nil { - return nil, err - } - return strings.Fields(string(b)), nil -} - -type ControllerToggle int - -const ( - Enable ControllerToggle = iota + 1 - Disable -) - -func toggleFunc(controllers []string, prefix string) []string { - out := make([]string, len(controllers)) - for i, c := range controllers { - out[i] = prefix + c - } - return out -} - -func (c *Manager) ToggleControllers(controllers []string, t ControllerToggle) error { - // when c.path is like /foo/bar/baz, the following files need to be written: - // * /sys/fs/cgroup/cgroup.subtree_control - // * /sys/fs/cgroup/foo/cgroup.subtree_control - // * /sys/fs/cgroup/foo/bar/cgroup.subtree_control - // Note that /sys/fs/cgroup/foo/bar/baz/cgroup.subtree_control does not need to be written. - split := strings.Split(c.path, "/") - var lastErr error - for i := range split { - f := strings.Join(split[:i], "/") - if !strings.HasPrefix(f, c.unifiedMountpoint) || f == c.path { - continue - } - filePath := filepath.Join(f, subtreeControl) - if err := c.writeSubtreeControl(filePath, controllers, t); err != nil { - // When running as rootless, the user may face EPERM on parent groups, but it is neglible when the - // controller is already written. - // So we only return the last error. - lastErr = errors.Wrapf(err, "failed to write subtree controllers %+v to %q", controllers, filePath) - } - } - return lastErr -} - -func (c *Manager) writeSubtreeControl(filePath string, controllers []string, t ControllerToggle) error { - f, err := os.OpenFile(filePath, os.O_WRONLY, 0) - if err != nil { - return err - } - defer f.Close() - switch t { - case Enable: - controllers = toggleFunc(controllers, "+") - case Disable: - controllers = toggleFunc(controllers, "-") - } - _, err = f.WriteString(strings.Join(controllers, " ")) - return err -} - -func (c *Manager) NewChild(name string, resources *Resources) (*Manager, error) { - if strings.HasPrefix(name, "/") { - return nil, errors.New("name must be relative") - } - path := filepath.Join(c.path, name) - if err := os.MkdirAll(path, defaultDirPerm); err != nil { - return nil, err - } - if err := setResources(path, resources); err != nil { - // clean up cgroup dir on failure - os.Remove(path) - return nil, err - } - return &Manager{ - unifiedMountpoint: c.unifiedMountpoint, - path: path, - }, nil -} - -func (c *Manager) AddProc(pid uint64) error { - v := Value{ - filename: cgroupProcs, - value: pid, - } - return writeValues(c.path, []Value{v}) -} - -func (c *Manager) Delete() error { - return remove(c.path) -} - -func (c *Manager) Procs(recursive bool) ([]uint64, error) { - var processes []uint64 - err := filepath.Walk(c.path, func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !recursive && info.IsDir() { - if p == c.path { - return nil - } - return filepath.SkipDir - } - _, name := filepath.Split(p) - if name != cgroupProcs { - return nil - } - procs, err := parseCgroupProcsFile(p) - if err != nil { - return err - } - processes = append(processes, procs...) - return nil - }) - return processes, err -} - -var singleValueFiles = []string{ - "pids.current", - "pids.max", -} - -func (c *Manager) Stat() (*stats.Metrics, error) { - controllers, err := c.Controllers() - if err != nil { - return nil, err - } - out := make(map[string]interface{}) - for _, controller := range controllers { - switch controller { - case "cpu", "memory": - if err := readKVStatsFile(c.path, controller+".stat", out); err != nil { - if os.IsNotExist(err) { - continue - } - return nil, err - } - } - } - for _, name := range singleValueFiles { - if err := readSingleFile(c.path, name, out); err != nil { - if os.IsNotExist(err) { - continue - } - return nil, err - } - } - memoryEvents := make(map[string]interface{}) - if err := readKVStatsFile(c.path, "memory.events", memoryEvents); err != nil { - if !os.IsNotExist(err) { - return nil, err - } - } - var metrics stats.Metrics - - metrics.Pids = &stats.PidsStat{ - Current: getPidValue("pids.current", out), - Limit: getPidValue("pids.max", out), - } - metrics.CPU = &stats.CPUStat{ - UsageUsec: getUint64Value("usage_usec", out), - UserUsec: getUint64Value("user_usec", out), - SystemUsec: getUint64Value("system_usec", out), - NrPeriods: getUint64Value("nr_periods", out), - NrThrottled: getUint64Value("nr_throttled", out), - ThrottledUsec: getUint64Value("throttled_usec", out), - } - metrics.Memory = &stats.MemoryStat{ - Anon: getUint64Value("anon", out), - File: getUint64Value("file", out), - KernelStack: getUint64Value("kernel_stack", out), - Slab: getUint64Value("slab", out), - Sock: getUint64Value("sock", out), - Shmem: getUint64Value("shmem", out), - FileMapped: getUint64Value("file_mapped", out), - FileDirty: getUint64Value("file_dirty", out), - FileWriteback: getUint64Value("file_writeback", out), - AnonThp: getUint64Value("anon_thp", out), - InactiveAnon: getUint64Value("inactive_anon", out), - ActiveAnon: getUint64Value("active_anon", out), - InactiveFile: getUint64Value("inactive_file", out), - ActiveFile: getUint64Value("active_file", out), - Unevictable: getUint64Value("unevictable", out), - SlabReclaimable: getUint64Value("slab_reclaimable", out), - SlabUnreclaimable: getUint64Value("slab_unreclaimable", out), - Pgfault: getUint64Value("pgfault", out), - Pgmajfault: getUint64Value("pgmajfault", out), - WorkingsetRefault: getUint64Value("workingset_refault", out), - WorkingsetActivate: getUint64Value("workingset_activate", out), - WorkingsetNodereclaim: getUint64Value("workingset_nodereclaim", out), - Pgrefill: getUint64Value("pgrefill", out), - Pgscan: getUint64Value("pgscan", out), - Pgsteal: getUint64Value("pgsteal", out), - Pgactivate: getUint64Value("pgactivate", out), - Pgdeactivate: getUint64Value("pgdeactivate", out), - Pglazyfree: getUint64Value("pglazyfree", out), - Pglazyfreed: getUint64Value("pglazyfreed", out), - ThpFaultAlloc: getUint64Value("thp_fault_alloc", out), - ThpCollapseAlloc: getUint64Value("thp_collapse_alloc", out), - Usage: getStatFileContentUint64(filepath.Join(c.path, "memory.current")), - UsageLimit: getStatFileContentUint64(filepath.Join(c.path, "memory.max")), - SwapUsage: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.current")), - SwapLimit: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.max")), - } - if len(memoryEvents) > 0 { - metrics.MemoryEvents = &stats.MemoryEvents{ - Low: getUint64Value("low", memoryEvents), - High: getUint64Value("high", memoryEvents), - Max: getUint64Value("max", memoryEvents), - Oom: getUint64Value("oom", memoryEvents), - OomKill: getUint64Value("oom_kill", memoryEvents), - } - } - metrics.Io = &stats.IOStat{Usage: readIoStats(c.path)} - metrics.Rdma = &stats.RdmaStat{ - Current: rdmaStats(filepath.Join(c.path, "rdma.current")), - Limit: rdmaStats(filepath.Join(c.path, "rdma.max")), - } - metrics.Hugetlb = readHugeTlbStats(c.path) - - return &metrics, nil -} - -func getUint64Value(key string, out map[string]interface{}) uint64 { - v, ok := out[key] - if !ok { - return 0 - } - switch t := v.(type) { - case uint64: - return t - } - return 0 -} - -func getPidValue(key string, out map[string]interface{}) uint64 { - v, ok := out[key] - if !ok { - return 0 - } - switch t := v.(type) { - case uint64: - return t - case string: - if t == "max" { - return math.MaxUint64 - } - } - return 0 -} - -func readSingleFile(path string, file string, out map[string]interface{}) error { - f, err := os.Open(filepath.Join(path, file)) - if err != nil { - return err - } - defer f.Close() - data, err := ioutil.ReadAll(f) - if err != nil { - return err - } - s := strings.TrimSpace(string(data)) - v, err := parseUint(s, 10, 64) - if err != nil { - // if we cannot parse as a uint, parse as a string - out[file] = s - return nil - } - out[file] = v - return nil -} - -func readKVStatsFile(path string, file string, out map[string]interface{}) error { - f, err := os.Open(filepath.Join(path, file)) - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - name, value, err := parseKV(s.Text()) - if err != nil { - return errors.Wrapf(err, "error while parsing %s (line=%q)", filepath.Join(path, file), s.Text()) - } - out[name] = value - } - return s.Err() -} - -func (c *Manager) Freeze() error { - return c.freeze(c.path, Frozen) -} - -func (c *Manager) Thaw() error { - return c.freeze(c.path, Thawed) -} - -func (c *Manager) freeze(path string, state State) error { - values := state.Values() - for { - if err := writeValues(path, values); err != nil { - return err - } - current, err := fetchState(path) - if err != nil { - return err - } - if current == state { - return nil - } - time.Sleep(1 * time.Millisecond) - } -} - -// MemoryEventFD returns inotify file descriptor and 'memory.events' inotify watch descriptor -func (c *Manager) MemoryEventFD() (int, uint32, error) { - fpath := filepath.Join(c.path, "memory.events") - fd, err := syscall.InotifyInit() - if err != nil { - return 0, 0, errors.Errorf("Failed to create inotify fd") - } - wd, err := syscall.InotifyAddWatch(fd, fpath, unix.IN_MODIFY) - if wd < 0 { - syscall.Close(fd) - return 0, 0, errors.Errorf("Failed to add inotify watch for %q", fpath) - } - - return fd, uint32(wd), nil -} - -func (c *Manager) EventChan() (<-chan Event, <-chan error) { - ec := make(chan Event) - errCh := make(chan error) - go c.waitForEvents(ec, errCh) - - return ec, nil -} - -func (c *Manager) waitForEvents(ec chan<- Event, errCh chan<- error) { - fd, wd, err := c.MemoryEventFD() - - defer syscall.InotifyRmWatch(fd, wd) - defer syscall.Close(fd) - - if err != nil { - errCh <- err - return - } - - for { - buffer := make([]byte, syscall.SizeofInotifyEvent*10) - bytesRead, err := syscall.Read(fd, buffer) - if err != nil { - errCh <- err - return - } - if bytesRead >= syscall.SizeofInotifyEvent { - out := make(map[string]interface{}) - if err := readKVStatsFile(c.path, "memory.events", out); err == nil { - e := Event{} - if v, ok := out["high"]; ok { - e.High, ok = v.(uint64) - if !ok { - errCh <- errors.Errorf("cannot convert high to uint64: %+v", v) - return - } - } - if v, ok := out["low"]; ok { - e.Low, ok = v.(uint64) - if !ok { - errCh <- errors.Errorf("cannot convert low to uint64: %+v", v) - return - } - } - if v, ok := out["max"]; ok { - e.Max, ok = v.(uint64) - if !ok { - errCh <- errors.Errorf("cannot convert max to uint64: %+v", v) - return - } - } - if v, ok := out["oom"]; ok { - e.OOM, ok = v.(uint64) - if !ok { - errCh <- errors.Errorf("cannot convert oom to uint64: %+v", v) - return - } - } - if v, ok := out["oom_kill"]; ok { - e.OOMKill, ok = v.(uint64) - if !ok { - errCh <- errors.Errorf("cannot convert oom_kill to uint64: %+v", v) - return - } - } - ec <- e - } else { - errCh <- err - return - } - } - } -} - -func setDevices(path string, devices []specs.LinuxDeviceCgroup) error { - if len(devices) == 0 { - return nil - } - insts, license, err := DeviceFilter(devices) - if err != nil { - return err - } - dirFD, err := unix.Open(path, unix.O_DIRECTORY|unix.O_RDONLY, 0600) - if err != nil { - return errors.Errorf("cannot get dir FD for %s", path) - } - defer unix.Close(dirFD) - if _, err := LoadAttachCgroupDeviceFilter(insts, license, dirFD); err != nil { - if !canSkipEBPFError(devices) { - return err - } - } - return nil -} - -func NewSystemd(slice, group string, pid int, resources *Resources) (*Manager, error) { - if slice == "" { - slice = defaultSlice - } - path := filepath.Join(defaultCgroup2Path, slice, group) - conn, err := systemdDbus.New() - if err != nil { - return &Manager{}, err - } - defer conn.Close() - - properties := []systemdDbus.Property{ - systemdDbus.PropDescription("cgroup " + group), - newSystemdProperty("DefaultDependencies", false), - newSystemdProperty("MemoryAccounting", true), - newSystemdProperty("CPUAccounting", true), - newSystemdProperty("IOAccounting", true), - } - - // if we create a slice, the parent is defined via a Wants= - if strings.HasSuffix(group, ".slice") { - properties = append(properties, systemdDbus.PropWants(defaultSlice)) - } else { - // otherwise, we use Slice= - properties = append(properties, systemdDbus.PropSlice(defaultSlice)) - } - - // only add pid if its valid, -1 is used w/ general slice creation. - if pid != -1 { - properties = append(properties, newSystemdProperty("PIDs", []uint32{uint32(pid)})) - } - - if resources.Memory != nil && *resources.Memory.Max != 0 { - properties = append(properties, - newSystemdProperty("MemoryMax", uint64(*resources.Memory.Max))) - } - - if resources.CPU != nil && *resources.CPU.Weight != 0 { - properties = append(properties, - newSystemdProperty("CPUWeight", *resources.CPU.Weight)) - } - - if resources.CPU != nil && resources.CPU.Max != "" { - quota, period := resources.CPU.Max.extractQuotaAndPeriod() - // cpu.cfs_quota_us and cpu.cfs_period_us are controlled by systemd. - // corresponds to USEC_INFINITY in systemd - // if USEC_INFINITY is provided, CPUQuota is left unbound by systemd - // always setting a property value ensures we can apply a quota and remove it later - cpuQuotaPerSecUSec := uint64(math.MaxUint64) - if quota > 0 { - // systemd converts CPUQuotaPerSecUSec (microseconds per CPU second) to CPUQuota - // (integer percentage of CPU) internally. This means that if a fractional percent of - // CPU is indicated by Resources.CpuQuota, we need to round up to the nearest - // 10ms (1% of a second) such that child cgroups can set the cpu.cfs_quota_us they expect. - cpuQuotaPerSecUSec = uint64(quota*1000000) / period - if cpuQuotaPerSecUSec%10000 != 0 { - cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000 - } - } - properties = append(properties, - newSystemdProperty("CPUQuotaPerSecUSec", cpuQuotaPerSecUSec)) - } - - // If we can delegate, we add the property back in - if canDelegate { - properties = append(properties, newSystemdProperty("Delegate", true)) - } - - if resources.Pids != nil && resources.Pids.Max > 0 { - properties = append(properties, - newSystemdProperty("TasksAccounting", true), - newSystemdProperty("TasksMax", uint64(resources.Pids.Max))) - } - - statusChan := make(chan string, 1) - if _, err := conn.StartTransientUnit(group, "replace", properties, statusChan); err == nil { - select { - case <-statusChan: - case <-time.After(time.Second): - logrus.Warnf("Timed out while waiting for StartTransientUnit(%s) completion signal from dbus. Continuing...", group) - } - } else if !isUnitExists(err) { - return &Manager{}, err - } - - return &Manager{ - path: path, - }, nil -} - -func LoadSystemd(slice, group string) (*Manager, error) { - if slice == "" { - slice = defaultSlice - } - group = filepath.Join(defaultCgroup2Path, slice, group) - return &Manager{ - path: group, - }, nil -} - -func (c *Manager) DeleteSystemd() error { - conn, err := systemdDbus.New() - if err != nil { - return err - } - defer conn.Close() - group := systemdUnitFromPath(c.path) - ch := make(chan string) - _, err = conn.StopUnit(group, "replace", ch) - if err != nil { - return err - } - <-ch - return nil -} - -func newSystemdProperty(name string, units interface{}) systemdDbus.Property { - return systemdDbus.Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} diff --git a/vendor/github.com/containerd/cgroups/v2/memory.go b/vendor/github.com/containerd/cgroups/v2/memory.go deleted file mode 100644 index 72f94b7..0000000 --- a/vendor/github.com/containerd/cgroups/v2/memory.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -type Memory struct { - Swap *int64 - Max *int64 - Low *int64 - High *int64 -} - -func (r *Memory) Values() (o []Value) { - if r.Swap != nil { - o = append(o, Value{ - filename: "memory.swap.max", - value: *r.Swap, - }) - } - if r.Max != nil { - o = append(o, Value{ - filename: "memory.max", - value: *r.Max, - }) - } - if r.Low != nil { - o = append(o, Value{ - filename: "memory.low", - value: *r.Low, - }) - } - if r.High != nil { - o = append(o, Value{ - filename: "memory.high", - value: *r.High, - }) - } - return o -} diff --git a/vendor/github.com/containerd/cgroups/v2/paths.go b/vendor/github.com/containerd/cgroups/v2/paths.go deleted file mode 100644 index c4778c1..0000000 --- a/vendor/github.com/containerd/cgroups/v2/paths.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "fmt" - "path/filepath" - "strings" -) - -// NestedGroupPath will nest the cgroups based on the calling processes cgroup -// placing its child processes inside its own path -func NestedGroupPath(suffix string) (string, error) { - path, err := parseCgroupFile("/proc/self/cgroup") - if err != nil { - return "", err - } - return filepath.Join(path, suffix), nil -} - -// PidGroupPath will return the correct cgroup paths for an existing process running inside a cgroup -// This is commonly used for the Load function to restore an existing container -func PidGroupPath(pid int) (string, error) { - p := fmt.Sprintf("/proc/%d/cgroup", pid) - return parseCgroupFile(p) -} - -// VerifyGroupPath verifies the format of group path string g. -// The format is same as the third field in /proc/PID/cgroup. -// e.g. "/user.slice/user-1001.slice/session-1.scope" -// -// g must be a "clean" absolute path starts with "/", and must not contain "/sys/fs/cgroup" prefix. -// -// VerifyGroupPath doesn't verify whether g actually exists on the system. -func VerifyGroupPath(g string) error { - if !strings.HasPrefix(g, "/") { - return ErrInvalidGroupPath - } - if filepath.Clean(g) != g { - return ErrInvalidGroupPath - } - if strings.HasPrefix(g, "/sys/fs/cgroup") { - return ErrInvalidGroupPath - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/v2/pids.go b/vendor/github.com/containerd/cgroups/v2/pids.go deleted file mode 100644 index 0b5aa0c..0000000 --- a/vendor/github.com/containerd/cgroups/v2/pids.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import "strconv" - -type Pids struct { - Max int64 -} - -func (r *Pids) Values() (o []Value) { - if r.Max != 0 { - limit := "max" - if r.Max > 0 { - limit = strconv.FormatInt(r.Max, 10) - } - o = append(o, Value{ - filename: "pids.max", - value: limit, - }) - } - return o -} diff --git a/vendor/github.com/containerd/cgroups/v2/state.go b/vendor/github.com/containerd/cgroups/v2/state.go deleted file mode 100644 index 09b75b6..0000000 --- a/vendor/github.com/containerd/cgroups/v2/state.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "io/ioutil" - "path/filepath" - "strings" -) - -// State is a type that represents the state of the current cgroup -type State string - -const ( - Unknown State = "" - Thawed State = "thawed" - Frozen State = "frozen" - Deleted State = "deleted" - - cgroupFreeze = "cgroup.freeze" -) - -func (s State) Values() []Value { - v := Value{ - filename: cgroupFreeze, - } - switch s { - case Frozen: - v.value = "1" - case Thawed: - v.value = "0" - } - return []Value{ - v, - } -} - -func fetchState(path string) (State, error) { - current, err := ioutil.ReadFile(filepath.Join(path, cgroupFreeze)) - if err != nil { - return Unknown, err - } - switch strings.TrimSpace(string(current)) { - case "1": - return Frozen, nil - case "0": - return Thawed, nil - default: - return Unknown, nil - } -} diff --git a/vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go b/vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go deleted file mode 100644 index 0bd4939..0000000 --- a/vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go +++ /dev/null @@ -1,3992 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/cgroups/v2/stats/metrics.proto - -package stats - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Metrics struct { - Pids *PidsStat `protobuf:"bytes,1,opt,name=pids,proto3" json:"pids,omitempty"` - CPU *CPUStat `protobuf:"bytes,2,opt,name=cpu,proto3" json:"cpu,omitempty"` - Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` - Rdma *RdmaStat `protobuf:"bytes,5,opt,name=rdma,proto3" json:"rdma,omitempty"` - Io *IOStat `protobuf:"bytes,6,opt,name=io,proto3" json:"io,omitempty"` - Hugetlb []*HugeTlbStat `protobuf:"bytes,7,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` - MemoryEvents *MemoryEvents `protobuf:"bytes,8,opt,name=memory_events,json=memoryEvents,proto3" json:"memory_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metrics) Reset() { *m = Metrics{} } -func (*Metrics) ProtoMessage() {} -func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{0} -} -func (m *Metrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metrics.Merge(m, src) -} -func (m *Metrics) XXX_Size() int { - return m.Size() -} -func (m *Metrics) XXX_DiscardUnknown() { - xxx_messageInfo_Metrics.DiscardUnknown(m) -} - -var xxx_messageInfo_Metrics proto.InternalMessageInfo - -type PidsStat struct { - Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` - Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsStat) Reset() { *m = PidsStat{} } -func (*PidsStat) ProtoMessage() {} -func (*PidsStat) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{1} -} -func (m *PidsStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsStat.Merge(m, src) -} -func (m *PidsStat) XXX_Size() int { - return m.Size() -} -func (m *PidsStat) XXX_DiscardUnknown() { - xxx_messageInfo_PidsStat.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsStat proto.InternalMessageInfo - -type CPUStat struct { - UsageUsec uint64 `protobuf:"varint,1,opt,name=usage_usec,json=usageUsec,proto3" json:"usage_usec,omitempty"` - UserUsec uint64 `protobuf:"varint,2,opt,name=user_usec,json=userUsec,proto3" json:"user_usec,omitempty"` - SystemUsec uint64 `protobuf:"varint,3,opt,name=system_usec,json=systemUsec,proto3" json:"system_usec,omitempty"` - NrPeriods uint64 `protobuf:"varint,4,opt,name=nr_periods,json=nrPeriods,proto3" json:"nr_periods,omitempty"` - NrThrottled uint64 `protobuf:"varint,5,opt,name=nr_throttled,json=nrThrottled,proto3" json:"nr_throttled,omitempty"` - ThrottledUsec uint64 `protobuf:"varint,6,opt,name=throttled_usec,json=throttledUsec,proto3" json:"throttled_usec,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CPUStat) Reset() { *m = CPUStat{} } -func (*CPUStat) ProtoMessage() {} -func (*CPUStat) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{2} -} -func (m *CPUStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPUStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPUStat.Merge(m, src) -} -func (m *CPUStat) XXX_Size() int { - return m.Size() -} -func (m *CPUStat) XXX_DiscardUnknown() { - xxx_messageInfo_CPUStat.DiscardUnknown(m) -} - -var xxx_messageInfo_CPUStat proto.InternalMessageInfo - -type MemoryStat struct { - Anon uint64 `protobuf:"varint,1,opt,name=anon,proto3" json:"anon,omitempty"` - File uint64 `protobuf:"varint,2,opt,name=file,proto3" json:"file,omitempty"` - KernelStack uint64 `protobuf:"varint,3,opt,name=kernel_stack,json=kernelStack,proto3" json:"kernel_stack,omitempty"` - Slab uint64 `protobuf:"varint,4,opt,name=slab,proto3" json:"slab,omitempty"` - Sock uint64 `protobuf:"varint,5,opt,name=sock,proto3" json:"sock,omitempty"` - Shmem uint64 `protobuf:"varint,6,opt,name=shmem,proto3" json:"shmem,omitempty"` - FileMapped uint64 `protobuf:"varint,7,opt,name=file_mapped,json=fileMapped,proto3" json:"file_mapped,omitempty"` - FileDirty uint64 `protobuf:"varint,8,opt,name=file_dirty,json=fileDirty,proto3" json:"file_dirty,omitempty"` - FileWriteback uint64 `protobuf:"varint,9,opt,name=file_writeback,json=fileWriteback,proto3" json:"file_writeback,omitempty"` - AnonThp uint64 `protobuf:"varint,10,opt,name=anon_thp,json=anonThp,proto3" json:"anon_thp,omitempty"` - InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` - ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` - InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` - ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` - Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` - SlabReclaimable uint64 `protobuf:"varint,16,opt,name=slab_reclaimable,json=slabReclaimable,proto3" json:"slab_reclaimable,omitempty"` - SlabUnreclaimable uint64 `protobuf:"varint,17,opt,name=slab_unreclaimable,json=slabUnreclaimable,proto3" json:"slab_unreclaimable,omitempty"` - Pgfault uint64 `protobuf:"varint,18,opt,name=pgfault,proto3" json:"pgfault,omitempty"` - Pgmajfault uint64 `protobuf:"varint,19,opt,name=pgmajfault,proto3" json:"pgmajfault,omitempty"` - WorkingsetRefault uint64 `protobuf:"varint,20,opt,name=workingset_refault,json=workingsetRefault,proto3" json:"workingset_refault,omitempty"` - WorkingsetActivate uint64 `protobuf:"varint,21,opt,name=workingset_activate,json=workingsetActivate,proto3" json:"workingset_activate,omitempty"` - WorkingsetNodereclaim uint64 `protobuf:"varint,22,opt,name=workingset_nodereclaim,json=workingsetNodereclaim,proto3" json:"workingset_nodereclaim,omitempty"` - Pgrefill uint64 `protobuf:"varint,23,opt,name=pgrefill,proto3" json:"pgrefill,omitempty"` - Pgscan uint64 `protobuf:"varint,24,opt,name=pgscan,proto3" json:"pgscan,omitempty"` - Pgsteal uint64 `protobuf:"varint,25,opt,name=pgsteal,proto3" json:"pgsteal,omitempty"` - Pgactivate uint64 `protobuf:"varint,26,opt,name=pgactivate,proto3" json:"pgactivate,omitempty"` - Pgdeactivate uint64 `protobuf:"varint,27,opt,name=pgdeactivate,proto3" json:"pgdeactivate,omitempty"` - Pglazyfree uint64 `protobuf:"varint,28,opt,name=pglazyfree,proto3" json:"pglazyfree,omitempty"` - Pglazyfreed uint64 `protobuf:"varint,29,opt,name=pglazyfreed,proto3" json:"pglazyfreed,omitempty"` - ThpFaultAlloc uint64 `protobuf:"varint,30,opt,name=thp_fault_alloc,json=thpFaultAlloc,proto3" json:"thp_fault_alloc,omitempty"` - ThpCollapseAlloc uint64 `protobuf:"varint,31,opt,name=thp_collapse_alloc,json=thpCollapseAlloc,proto3" json:"thp_collapse_alloc,omitempty"` - Usage uint64 `protobuf:"varint,32,opt,name=usage,proto3" json:"usage,omitempty"` - UsageLimit uint64 `protobuf:"varint,33,opt,name=usage_limit,json=usageLimit,proto3" json:"usage_limit,omitempty"` - SwapUsage uint64 `protobuf:"varint,34,opt,name=swap_usage,json=swapUsage,proto3" json:"swap_usage,omitempty"` - SwapLimit uint64 `protobuf:"varint,35,opt,name=swap_limit,json=swapLimit,proto3" json:"swap_limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryStat) Reset() { *m = MemoryStat{} } -func (*MemoryStat) ProtoMessage() {} -func (*MemoryStat) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{3} -} -func (m *MemoryStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryStat.Merge(m, src) -} -func (m *MemoryStat) XXX_Size() int { - return m.Size() -} -func (m *MemoryStat) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryStat.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryStat proto.InternalMessageInfo - -type MemoryEvents struct { - Low uint64 `protobuf:"varint,1,opt,name=low,proto3" json:"low,omitempty"` - High uint64 `protobuf:"varint,2,opt,name=high,proto3" json:"high,omitempty"` - Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` - Oom uint64 `protobuf:"varint,4,opt,name=oom,proto3" json:"oom,omitempty"` - OomKill uint64 `protobuf:"varint,5,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryEvents) Reset() { *m = MemoryEvents{} } -func (*MemoryEvents) ProtoMessage() {} -func (*MemoryEvents) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{4} -} -func (m *MemoryEvents) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryEvents.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryEvents) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryEvents.Merge(m, src) -} -func (m *MemoryEvents) XXX_Size() int { - return m.Size() -} -func (m *MemoryEvents) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryEvents.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryEvents proto.InternalMessageInfo - -type RdmaStat struct { - Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` - Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaStat) Reset() { *m = RdmaStat{} } -func (*RdmaStat) ProtoMessage() {} -func (*RdmaStat) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{5} -} -func (m *RdmaStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaStat.Merge(m, src) -} -func (m *RdmaStat) XXX_Size() int { - return m.Size() -} -func (m *RdmaStat) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaStat.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaStat proto.InternalMessageInfo - -type RdmaEntry struct { - Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` - HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` - HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } -func (*RdmaEntry) ProtoMessage() {} -func (*RdmaEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{6} -} -func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaEntry.Merge(m, src) -} -func (m *RdmaEntry) XXX_Size() int { - return m.Size() -} -func (m *RdmaEntry) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo - -type IOStat struct { - Usage []*IOEntry `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IOStat) Reset() { *m = IOStat{} } -func (*IOStat) ProtoMessage() {} -func (*IOStat) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{7} -} -func (m *IOStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IOStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IOStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_IOStat.Merge(m, src) -} -func (m *IOStat) XXX_Size() int { - return m.Size() -} -func (m *IOStat) XXX_DiscardUnknown() { - xxx_messageInfo_IOStat.DiscardUnknown(m) -} - -var xxx_messageInfo_IOStat proto.InternalMessageInfo - -type IOEntry struct { - Major uint64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor uint64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` - Rbytes uint64 `protobuf:"varint,3,opt,name=rbytes,proto3" json:"rbytes,omitempty"` - Wbytes uint64 `protobuf:"varint,4,opt,name=wbytes,proto3" json:"wbytes,omitempty"` - Rios uint64 `protobuf:"varint,5,opt,name=rios,proto3" json:"rios,omitempty"` - Wios uint64 `protobuf:"varint,6,opt,name=wios,proto3" json:"wios,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IOEntry) Reset() { *m = IOEntry{} } -func (*IOEntry) ProtoMessage() {} -func (*IOEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{8} -} -func (m *IOEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IOEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IOEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_IOEntry.Merge(m, src) -} -func (m *IOEntry) XXX_Size() int { - return m.Size() -} -func (m *IOEntry) XXX_DiscardUnknown() { - xxx_messageInfo_IOEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_IOEntry proto.InternalMessageInfo - -type HugeTlbStat struct { - Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` - Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` - Pagesize string `protobuf:"bytes,3,opt,name=pagesize,proto3" json:"pagesize,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HugeTlbStat) Reset() { *m = HugeTlbStat{} } -func (*HugeTlbStat) ProtoMessage() {} -func (*HugeTlbStat) Descriptor() ([]byte, []int) { - return fileDescriptor_2fc6005842049e6b, []int{9} -} -func (m *HugeTlbStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HugeTlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HugeTlbStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HugeTlbStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_HugeTlbStat.Merge(m, src) -} -func (m *HugeTlbStat) XXX_Size() int { - return m.Size() -} -func (m *HugeTlbStat) XXX_DiscardUnknown() { - xxx_messageInfo_HugeTlbStat.DiscardUnknown(m) -} - -var xxx_messageInfo_HugeTlbStat proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v2.Metrics") - proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v2.PidsStat") - proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v2.CPUStat") - proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v2.MemoryStat") - proto.RegisterType((*MemoryEvents)(nil), "io.containerd.cgroups.v2.MemoryEvents") - proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v2.RdmaStat") - proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v2.RdmaEntry") - proto.RegisterType((*IOStat)(nil), "io.containerd.cgroups.v2.IOStat") - proto.RegisterType((*IOEntry)(nil), "io.containerd.cgroups.v2.IOEntry") - proto.RegisterType((*HugeTlbStat)(nil), "io.containerd.cgroups.v2.HugeTlbStat") -} - -func init() { - proto.RegisterFile("github.com/containerd/cgroups/v2/stats/metrics.proto", fileDescriptor_2fc6005842049e6b) -} - -var fileDescriptor_2fc6005842049e6b = []byte{ - // 1198 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4d, 0x73, 0xd4, 0x46, - 0x13, 0x66, 0xed, 0xc5, 0xeb, 0xed, 0xb5, 0xc1, 0x0c, 0x86, 0x57, 0xc0, 0xcb, 0xda, 0x5e, 0x02, - 0x45, 0xaa, 0x92, 0xdd, 0x94, 0xf3, 0x55, 0x49, 0x91, 0x4a, 0x19, 0x02, 0x45, 0x8a, 0x10, 0x5c, - 0x02, 0x57, 0x8e, 0xaa, 0x59, 0x69, 0x2c, 0x0d, 0x96, 0x34, 0xaa, 0x99, 0x91, 0x1d, 0x73, 0xca, - 0x21, 0xd7, 0x54, 0x7e, 0x4d, 0xfe, 0x03, 0xb7, 0xe4, 0x98, 0x53, 0x2a, 0xf8, 0x97, 0xa4, 0xba, - 0x67, 0x64, 0x29, 0x07, 0x43, 0x6e, 0xd3, 0x4f, 0x3f, 0xdd, 0xea, 0x8f, 0x99, 0x6e, 0xc1, 0x27, - 0xa9, 0xb4, 0x59, 0x3d, 0x9f, 0xc6, 0xaa, 0x98, 0xc5, 0xaa, 0xb4, 0x5c, 0x96, 0x42, 0x27, 0xb3, - 0x38, 0xd5, 0xaa, 0xae, 0xcc, 0xec, 0x70, 0x7b, 0x66, 0x2c, 0xb7, 0x66, 0x56, 0x08, 0xab, 0x65, - 0x6c, 0xa6, 0x95, 0x56, 0x56, 0xb1, 0x40, 0xaa, 0x69, 0xcb, 0x9e, 0x7a, 0xf6, 0xf4, 0x70, 0xfb, - 0xfa, 0x7a, 0xaa, 0x52, 0x45, 0xa4, 0x19, 0x9e, 0x1c, 0x7f, 0xf2, 0xdb, 0x22, 0x0c, 0x9e, 0x3a, - 0x0f, 0xec, 0x33, 0xe8, 0x57, 0x32, 0x31, 0x41, 0x6f, 0xb3, 0x77, 0x77, 0xb4, 0x3d, 0x99, 0x9e, - 0xe5, 0x6a, 0xba, 0x2b, 0x13, 0xf3, 0xdc, 0x72, 0x1b, 0x12, 0x9f, 0xdd, 0x83, 0xc5, 0xb8, 0xaa, - 0x83, 0x05, 0x32, 0xdb, 0x3a, 0xdb, 0xec, 0xc1, 0xee, 0x1e, 0x5a, 0xdd, 0x1f, 0x9c, 0xfc, 0xb5, - 0xb1, 0xf8, 0x60, 0x77, 0x2f, 0x44, 0x33, 0x76, 0x0f, 0x96, 0x0a, 0x51, 0x28, 0x7d, 0x1c, 0xf4, - 0xc9, 0xc1, 0x7b, 0x67, 0x3b, 0x78, 0x4a, 0x3c, 0xfa, 0xb2, 0xb7, 0xc1, 0x98, 0x75, 0x52, 0xf0, - 0xe0, 0xfc, 0xbb, 0x62, 0x0e, 0x93, 0x82, 0xbb, 0x98, 0x91, 0xcf, 0x3e, 0x82, 0x05, 0xa9, 0x82, - 0x25, 0xb2, 0xda, 0x3c, 0xdb, 0xea, 0xdb, 0x67, 0x64, 0xb3, 0x20, 0x15, 0xfb, 0x1a, 0x06, 0x59, - 0x9d, 0x0a, 0x9b, 0xcf, 0x83, 0xc1, 0xe6, 0xe2, 0xdd, 0xd1, 0xf6, 0xed, 0xb3, 0xcd, 0x1e, 0xd7, - 0xa9, 0x78, 0x91, 0xcf, 0xc9, 0xb6, 0xb1, 0x62, 0x4f, 0x60, 0xd5, 0x05, 0x1d, 0x89, 0x43, 0x51, - 0x5a, 0x13, 0x2c, 0xd3, 0xd7, 0xef, 0xbc, 0x2b, 0xdf, 0x87, 0xc4, 0x0e, 0x57, 0x8a, 0x8e, 0x34, - 0xf9, 0x12, 0x96, 0x9b, 0x2e, 0xb0, 0x00, 0x06, 0x71, 0xad, 0xb5, 0x28, 0x2d, 0xb5, 0xae, 0x1f, - 0x36, 0x22, 0x5b, 0x87, 0xf3, 0xb9, 0x2c, 0xa4, 0xa5, 0xde, 0xf4, 0x43, 0x27, 0x4c, 0x7e, 0xef, - 0xc1, 0xc0, 0xf7, 0x82, 0xdd, 0x04, 0xa8, 0x0d, 0x4f, 0x45, 0x54, 0x1b, 0x11, 0x7b, 0xf3, 0x21, - 0x21, 0x7b, 0x46, 0xc4, 0xec, 0x06, 0x0c, 0x6b, 0x23, 0xb4, 0xd3, 0x3a, 0x27, 0xcb, 0x08, 0x90, - 0x72, 0x03, 0x46, 0xe6, 0xd8, 0x58, 0x51, 0x38, 0xf5, 0x22, 0xa9, 0xc1, 0x41, 0x44, 0xb8, 0x09, - 0x50, 0xea, 0xa8, 0x12, 0x5a, 0xaa, 0xc4, 0x50, 0x7b, 0xfb, 0xe1, 0xb0, 0xd4, 0xbb, 0x0e, 0x60, - 0x5b, 0xb0, 0x52, 0xea, 0xc8, 0x66, 0x5a, 0x59, 0x9b, 0x8b, 0x84, 0x7a, 0xd8, 0x0f, 0x47, 0xa5, - 0x7e, 0xd1, 0x40, 0xec, 0x36, 0x5c, 0x38, 0xd5, 0xbb, 0xaf, 0x2c, 0x11, 0x69, 0xf5, 0x14, 0xc5, - 0x0f, 0x4d, 0x7e, 0x1d, 0x02, 0xb4, 0x97, 0x83, 0x31, 0xe8, 0xf3, 0x52, 0x95, 0x3e, 0x1d, 0x3a, - 0x23, 0xb6, 0x2f, 0x73, 0xe1, 0x93, 0xa0, 0x33, 0x06, 0x70, 0x20, 0x74, 0x29, 0xf2, 0xc8, 0x58, - 0x1e, 0x1f, 0xf8, 0x0c, 0x46, 0x0e, 0x7b, 0x8e, 0x10, 0x9a, 0x99, 0x9c, 0xcf, 0x7d, 0xf0, 0x74, - 0x26, 0x4c, 0xc5, 0x07, 0x3e, 0x5e, 0x3a, 0x63, 0xa5, 0x4d, 0x56, 0x88, 0xc2, 0xc7, 0xe7, 0x04, - 0xac, 0x10, 0x7e, 0x28, 0x2a, 0x78, 0x55, 0x89, 0x24, 0x18, 0xb8, 0x0a, 0x21, 0xf4, 0x94, 0x10, - 0xac, 0x10, 0x11, 0x12, 0xa9, 0xed, 0x31, 0x5d, 0x88, 0x7e, 0x38, 0x44, 0xe4, 0x1b, 0x04, 0x30, - 0x7d, 0x52, 0x1f, 0x69, 0x69, 0xc5, 0x1c, 0x43, 0x1c, 0xba, 0xf4, 0x11, 0xfd, 0xa1, 0x01, 0xd9, - 0x35, 0x58, 0xc6, 0x1c, 0x23, 0x9b, 0x55, 0x01, 0xb8, 0x1b, 0x80, 0xf2, 0x8b, 0xac, 0x62, 0xb7, - 0x60, 0x55, 0x96, 0x3c, 0xb6, 0xf2, 0x50, 0x44, 0x54, 0x93, 0x11, 0xe9, 0x57, 0x1a, 0x70, 0x07, - 0x6b, 0xb3, 0x01, 0xa3, 0x2e, 0x65, 0xc5, 0x85, 0xd9, 0x21, 0x74, 0xbd, 0x50, 0x15, 0x57, 0xff, - 0xed, 0xe5, 0x11, 0x56, 0xb3, 0xf5, 0x42, 0x94, 0x0b, 0x5d, 0x2f, 0x44, 0xd8, 0x84, 0x51, 0x5d, - 0x8a, 0x43, 0x19, 0x5b, 0x3e, 0xcf, 0x45, 0x70, 0xd1, 0x55, 0xbb, 0x03, 0xb1, 0xf7, 0x61, 0x0d, - 0x2b, 0x1c, 0x69, 0x11, 0xe7, 0x5c, 0x16, 0x44, 0x5b, 0x23, 0xda, 0x45, 0xc4, 0xc3, 0x16, 0x66, - 0x1f, 0x02, 0x23, 0x6a, 0x5d, 0x76, 0xc9, 0x97, 0x88, 0x7c, 0x09, 0x35, 0x7b, 0x5d, 0x05, 0xbe, - 0x91, 0x2a, 0xdd, 0xe7, 0x75, 0x6e, 0x03, 0xe6, 0x2a, 0xe4, 0x45, 0x36, 0x06, 0xa8, 0xd2, 0x82, - 0xbf, 0x74, 0xca, 0xcb, 0x2e, 0xea, 0x16, 0xc1, 0x0f, 0x1d, 0x29, 0x7d, 0x20, 0xcb, 0xd4, 0x08, - 0x1b, 0x69, 0xe1, 0x78, 0xeb, 0xee, 0x43, 0xad, 0x26, 0x74, 0x0a, 0x36, 0x83, 0xcb, 0x1d, 0x3a, - 0x65, 0xcf, 0xad, 0x08, 0xae, 0x10, 0xbf, 0xe3, 0x69, 0xc7, 0x6b, 0xd8, 0xa7, 0x70, 0xb5, 0x63, - 0x50, 0xaa, 0x44, 0xf8, 0xb8, 0x83, 0xab, 0x64, 0x73, 0xa5, 0xd5, 0x7e, 0xdf, 0x2a, 0xd9, 0x75, - 0x58, 0xae, 0x52, 0x2d, 0xf6, 0x65, 0x9e, 0x07, 0xff, 0x73, 0x0f, 0xb3, 0x91, 0xd9, 0x55, 0x58, - 0xaa, 0x52, 0x13, 0xf3, 0x32, 0x08, 0x48, 0xe3, 0x25, 0x57, 0x04, 0x63, 0x05, 0xcf, 0x83, 0x6b, - 0x4d, 0x11, 0x48, 0x74, 0x45, 0x38, 0x0d, 0xf6, 0x7a, 0x53, 0x84, 0x06, 0x61, 0x13, 0x58, 0xa9, - 0xd2, 0x44, 0x9c, 0x32, 0x6e, 0xb8, 0xfe, 0x77, 0x31, 0xe7, 0x23, 0xe7, 0xaf, 0x8e, 0xf7, 0xb5, - 0x10, 0xc1, 0xff, 0x1b, 0x1f, 0x0d, 0x82, 0xed, 0x6f, 0xa5, 0x24, 0xb8, 0xe9, 0xda, 0xdf, 0x81, - 0xd8, 0x1d, 0xb8, 0x68, 0xb3, 0x2a, 0xa2, 0x42, 0x46, 0x3c, 0xcf, 0x55, 0x1c, 0x8c, 0x9b, 0xe7, - 0x5e, 0x3d, 0x42, 0x74, 0x07, 0x41, 0xf6, 0x01, 0x30, 0xe4, 0xc5, 0x2a, 0xcf, 0x79, 0x65, 0x84, - 0xa7, 0x6e, 0x10, 0x75, 0xcd, 0x66, 0xd5, 0x03, 0xaf, 0x70, 0xec, 0x75, 0x38, 0x4f, 0x03, 0x2d, - 0xd8, 0x74, 0x4f, 0x93, 0x04, 0xbc, 0xad, 0x6e, 0xf0, 0xb9, 0x01, 0xb9, 0xe5, 0xc2, 0x25, 0xe8, - 0x3b, 0x44, 0xf0, 0x69, 0x9a, 0x23, 0x5e, 0x45, 0xce, 0x76, 0xe2, 0x9e, 0x26, 0x22, 0x7b, 0x64, - 0xdf, 0xa8, 0x9d, 0xf9, 0xad, 0x56, 0x4d, 0xd6, 0x13, 0x03, 0x2b, 0xdd, 0xe9, 0xcd, 0xd6, 0x60, - 0x31, 0x57, 0x47, 0x7e, 0x22, 0xe1, 0x11, 0xa7, 0x48, 0x26, 0xd3, 0xac, 0x19, 0x48, 0x78, 0x46, - 0x56, 0xc1, 0x7f, 0xf4, 0x73, 0x08, 0x8f, 0x88, 0x28, 0x55, 0xf8, 0xf1, 0x83, 0x47, 0x7c, 0xec, - 0x4a, 0x15, 0xd1, 0x01, 0x36, 0xde, 0x4d, 0xa0, 0x81, 0x52, 0xc5, 0x13, 0x99, 0xe7, 0x93, 0x9f, - 0x7b, 0xb0, 0xdc, 0xec, 0x39, 0xf6, 0x55, 0x77, 0x2b, 0xe0, 0xbe, 0xba, 0xf5, 0xf6, 0xe5, 0xf8, - 0xb0, 0xb4, 0xfa, 0xb8, 0x5d, 0x1d, 0x5f, 0xb4, 0xab, 0xe3, 0x3f, 0x1b, 0xfb, 0xfd, 0x22, 0x60, - 0x78, 0x8a, 0xe1, 0x5d, 0x4c, 0xf0, 0x81, 0x0b, 0xca, 0x7d, 0x18, 0x7a, 0x09, 0xeb, 0x9f, 0xc5, - 0x3c, 0xca, 0x78, 0x99, 0xe4, 0xc2, 0x50, 0x15, 0x56, 0x43, 0xc8, 0x62, 0xfe, 0xd8, 0x21, 0x0d, - 0x41, 0xcd, 0x5f, 0x8a, 0xd8, 0x1a, 0xaa, 0x89, 0x23, 0x3c, 0x73, 0xc8, 0x64, 0x07, 0x96, 0xdc, - 0x7a, 0x66, 0x9f, 0x37, 0x1d, 0x76, 0x89, 0x6e, 0xbd, 0x6d, 0x9f, 0xfb, 0x48, 0x89, 0x3f, 0xf9, - 0xa5, 0x07, 0x03, 0x0f, 0xe1, 0x35, 0x29, 0xf8, 0x4b, 0xa5, 0x7d, 0x8f, 0x9c, 0x40, 0xa8, 0x2c, - 0x95, 0x6e, 0x36, 0x28, 0x09, 0x98, 0x94, 0x9e, 0x1f, 0x5b, 0x61, 0x7c, 0xab, 0xbc, 0x84, 0xf8, - 0x91, 0xc3, 0x5d, 0xc3, 0xbc, 0x84, 0xbd, 0xd6, 0x52, 0x99, 0x66, 0x63, 0xe0, 0x19, 0xb1, 0x23, - 0xc4, 0xdc, 0xc2, 0xa0, 0xf3, 0x64, 0x0f, 0x46, 0x9d, 0x5f, 0x87, 0xb7, 0x2c, 0x76, 0x7f, 0x51, - 0x16, 0xda, 0x8b, 0x82, 0xf3, 0x80, 0xa7, 0xc2, 0xc8, 0x57, 0x82, 0x82, 0x1a, 0x86, 0xa7, 0xf2, - 0xfd, 0xe0, 0xf5, 0x9b, 0xf1, 0xb9, 0x3f, 0xdf, 0x8c, 0xcf, 0xfd, 0x74, 0x32, 0xee, 0xbd, 0x3e, - 0x19, 0xf7, 0xfe, 0x38, 0x19, 0xf7, 0xfe, 0x3e, 0x19, 0xf7, 0xe6, 0x4b, 0xf4, 0x17, 0xf8, 0xf1, - 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4f, 0x2b, 0x30, 0xd6, 0x6d, 0x0a, 0x00, 0x00, -} - -func (m *Metrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MemoryEvents != nil { - { - size, err := m.MemoryEvents.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.Hugetlb) > 0 { - for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.Io != nil { - { - size, err := m.Io.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Rdma != nil { - { - size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.CPU != nil { - { - size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Pids != nil { - { - size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PidsStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Limit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x10 - } - if m.Current != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CPUStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ThrottledUsec != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledUsec)) - i-- - dAtA[i] = 0x30 - } - if m.NrThrottled != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrThrottled)) - i-- - dAtA[i] = 0x28 - } - if m.NrPeriods != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrPeriods)) - i-- - dAtA[i] = 0x20 - } - if m.SystemUsec != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.SystemUsec)) - i-- - dAtA[i] = 0x18 - } - if m.UserUsec != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.UserUsec)) - i-- - dAtA[i] = 0x10 - } - if m.UsageUsec != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.UsageUsec)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.SwapLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.SwapLimit)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x98 - } - if m.SwapUsage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.SwapUsage)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x90 - } - if m.UsageLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.UsageLimit)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x88 - } - if m.Usage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x80 - } - if m.ThpCollapseAlloc != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThpCollapseAlloc)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf8 - } - if m.ThpFaultAlloc != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThpFaultAlloc)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf0 - } - if m.Pglazyfreed != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pglazyfreed)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe8 - } - if m.Pglazyfree != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pglazyfree)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe0 - } - if m.Pgdeactivate != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pgdeactivate)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd8 - } - if m.Pgactivate != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pgactivate)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd0 - } - if m.Pgsteal != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pgsteal)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc8 - } - if m.Pgscan != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pgscan)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - } - if m.Pgrefill != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pgrefill)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb8 - } - if m.WorkingsetNodereclaim != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.WorkingsetNodereclaim)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.WorkingsetActivate != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.WorkingsetActivate)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if m.WorkingsetRefault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.WorkingsetRefault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa0 - } - if m.Pgmajfault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pgmajfault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x98 - } - if m.Pgfault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Pgfault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - } - if m.SlabUnreclaimable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.SlabUnreclaimable)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - } - if m.SlabReclaimable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.SlabReclaimable)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.Unevictable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) - i-- - dAtA[i] = 0x78 - } - if m.ActiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) - i-- - dAtA[i] = 0x70 - } - if m.InactiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) - i-- - dAtA[i] = 0x68 - } - if m.ActiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) - i-- - dAtA[i] = 0x60 - } - if m.InactiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) - i-- - dAtA[i] = 0x58 - } - if m.AnonThp != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AnonThp)) - i-- - dAtA[i] = 0x50 - } - if m.FileWriteback != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.FileWriteback)) - i-- - dAtA[i] = 0x48 - } - if m.FileDirty != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.FileDirty)) - i-- - dAtA[i] = 0x40 - } - if m.FileMapped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.FileMapped)) - i-- - dAtA[i] = 0x38 - } - if m.Shmem != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Shmem)) - i-- - dAtA[i] = 0x30 - } - if m.Sock != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Sock)) - i-- - dAtA[i] = 0x28 - } - if m.Slab != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Slab)) - i-- - dAtA[i] = 0x20 - } - if m.KernelStack != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.KernelStack)) - i-- - dAtA[i] = 0x18 - } - if m.File != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.File)) - i-- - dAtA[i] = 0x10 - } - if m.Anon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Anon)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryEvents) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryEvents) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryEvents) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.OomKill != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) - i-- - dAtA[i] = 0x28 - } - if m.Oom != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Oom)) - i-- - dAtA[i] = 0x20 - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x18 - } - if m.High != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.High)) - i-- - dAtA[i] = 0x10 - } - if m.Low != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Low)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *RdmaStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Limit) > 0 { - for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Current) > 0 { - for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.HcaObjects != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) - i-- - dAtA[i] = 0x18 - } - if m.HcaHandles != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) - i-- - dAtA[i] = 0x10 - } - if len(m.Device) > 0 { - i -= len(m.Device) - copy(dAtA[i:], m.Device) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *IOStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IOStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Usage) > 0 { - for iNdEx := len(m.Usage) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Usage[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *IOEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IOEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Wios != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Wios)) - i-- - dAtA[i] = 0x30 - } - if m.Rios != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Rios)) - i-- - dAtA[i] = 0x28 - } - if m.Wbytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Wbytes)) - i-- - dAtA[i] = 0x20 - } - if m.Rbytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Rbytes)) - i-- - dAtA[i] = 0x18 - } - if m.Minor != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) - i-- - dAtA[i] = 0x10 - } - if m.Major != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *HugeTlbStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HugeTlbStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HugeTlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Pagesize) > 0 { - i -= len(m.Pagesize) - copy(dAtA[i:], m.Pagesize) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) - i-- - dAtA[i] = 0x1a - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - } - if m.Current != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Metrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pids != nil { - l = m.Pids.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.CPU != nil { - l = m.CPU.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Rdma != nil { - l = m.Rdma.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Io != nil { - l = m.Io.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if len(m.Hugetlb) > 0 { - for _, e := range m.Hugetlb { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.MemoryEvents != nil { - l = m.MemoryEvents.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Current != 0 { - n += 1 + sovMetrics(uint64(m.Current)) - } - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CPUStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.UsageUsec != 0 { - n += 1 + sovMetrics(uint64(m.UsageUsec)) - } - if m.UserUsec != 0 { - n += 1 + sovMetrics(uint64(m.UserUsec)) - } - if m.SystemUsec != 0 { - n += 1 + sovMetrics(uint64(m.SystemUsec)) - } - if m.NrPeriods != 0 { - n += 1 + sovMetrics(uint64(m.NrPeriods)) - } - if m.NrThrottled != 0 { - n += 1 + sovMetrics(uint64(m.NrThrottled)) - } - if m.ThrottledUsec != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledUsec)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Anon != 0 { - n += 1 + sovMetrics(uint64(m.Anon)) - } - if m.File != 0 { - n += 1 + sovMetrics(uint64(m.File)) - } - if m.KernelStack != 0 { - n += 1 + sovMetrics(uint64(m.KernelStack)) - } - if m.Slab != 0 { - n += 1 + sovMetrics(uint64(m.Slab)) - } - if m.Sock != 0 { - n += 1 + sovMetrics(uint64(m.Sock)) - } - if m.Shmem != 0 { - n += 1 + sovMetrics(uint64(m.Shmem)) - } - if m.FileMapped != 0 { - n += 1 + sovMetrics(uint64(m.FileMapped)) - } - if m.FileDirty != 0 { - n += 1 + sovMetrics(uint64(m.FileDirty)) - } - if m.FileWriteback != 0 { - n += 1 + sovMetrics(uint64(m.FileWriteback)) - } - if m.AnonThp != 0 { - n += 1 + sovMetrics(uint64(m.AnonThp)) - } - if m.InactiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.InactiveAnon)) - } - if m.ActiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.ActiveAnon)) - } - if m.InactiveFile != 0 { - n += 1 + sovMetrics(uint64(m.InactiveFile)) - } - if m.ActiveFile != 0 { - n += 1 + sovMetrics(uint64(m.ActiveFile)) - } - if m.Unevictable != 0 { - n += 1 + sovMetrics(uint64(m.Unevictable)) - } - if m.SlabReclaimable != 0 { - n += 2 + sovMetrics(uint64(m.SlabReclaimable)) - } - if m.SlabUnreclaimable != 0 { - n += 2 + sovMetrics(uint64(m.SlabUnreclaimable)) - } - if m.Pgfault != 0 { - n += 2 + sovMetrics(uint64(m.Pgfault)) - } - if m.Pgmajfault != 0 { - n += 2 + sovMetrics(uint64(m.Pgmajfault)) - } - if m.WorkingsetRefault != 0 { - n += 2 + sovMetrics(uint64(m.WorkingsetRefault)) - } - if m.WorkingsetActivate != 0 { - n += 2 + sovMetrics(uint64(m.WorkingsetActivate)) - } - if m.WorkingsetNodereclaim != 0 { - n += 2 + sovMetrics(uint64(m.WorkingsetNodereclaim)) - } - if m.Pgrefill != 0 { - n += 2 + sovMetrics(uint64(m.Pgrefill)) - } - if m.Pgscan != 0 { - n += 2 + sovMetrics(uint64(m.Pgscan)) - } - if m.Pgsteal != 0 { - n += 2 + sovMetrics(uint64(m.Pgsteal)) - } - if m.Pgactivate != 0 { - n += 2 + sovMetrics(uint64(m.Pgactivate)) - } - if m.Pgdeactivate != 0 { - n += 2 + sovMetrics(uint64(m.Pgdeactivate)) - } - if m.Pglazyfree != 0 { - n += 2 + sovMetrics(uint64(m.Pglazyfree)) - } - if m.Pglazyfreed != 0 { - n += 2 + sovMetrics(uint64(m.Pglazyfreed)) - } - if m.ThpFaultAlloc != 0 { - n += 2 + sovMetrics(uint64(m.ThpFaultAlloc)) - } - if m.ThpCollapseAlloc != 0 { - n += 2 + sovMetrics(uint64(m.ThpCollapseAlloc)) - } - if m.Usage != 0 { - n += 2 + sovMetrics(uint64(m.Usage)) - } - if m.UsageLimit != 0 { - n += 2 + sovMetrics(uint64(m.UsageLimit)) - } - if m.SwapUsage != 0 { - n += 2 + sovMetrics(uint64(m.SwapUsage)) - } - if m.SwapLimit != 0 { - n += 2 + sovMetrics(uint64(m.SwapLimit)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryEvents) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Low != 0 { - n += 1 + sovMetrics(uint64(m.Low)) - } - if m.High != 0 { - n += 1 + sovMetrics(uint64(m.High)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Oom != 0 { - n += 1 + sovMetrics(uint64(m.Oom)) - } - if m.OomKill != 0 { - n += 1 + sovMetrics(uint64(m.OomKill)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Current) > 0 { - for _, e := range m.Current { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Limit) > 0 { - for _, e := range m.Limit { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.HcaHandles != 0 { - n += 1 + sovMetrics(uint64(m.HcaHandles)) - } - if m.HcaObjects != 0 { - n += 1 + sovMetrics(uint64(m.HcaObjects)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *IOStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Usage) > 0 { - for _, e := range m.Usage { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *IOEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Major != 0 { - n += 1 + sovMetrics(uint64(m.Major)) - } - if m.Minor != 0 { - n += 1 + sovMetrics(uint64(m.Minor)) - } - if m.Rbytes != 0 { - n += 1 + sovMetrics(uint64(m.Rbytes)) - } - if m.Wbytes != 0 { - n += 1 + sovMetrics(uint64(m.Wbytes)) - } - if m.Rios != 0 { - n += 1 + sovMetrics(uint64(m.Rios)) - } - if m.Wios != 0 { - n += 1 + sovMetrics(uint64(m.Wios)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HugeTlbStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Current != 0 { - n += 1 + sovMetrics(uint64(m.Current)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - l = len(m.Pagesize) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Metrics) String() string { - if this == nil { - return "nil" - } - repeatedStringForHugetlb := "[]*HugeTlbStat{" - for _, f := range this.Hugetlb { - repeatedStringForHugetlb += strings.Replace(f.String(), "HugeTlbStat", "HugeTlbStat", 1) + "," - } - repeatedStringForHugetlb += "}" - s := strings.Join([]string{`&Metrics{`, - `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, - `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, - `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, - `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, - `Io:` + strings.Replace(this.Io.String(), "IOStat", "IOStat", 1) + `,`, - `Hugetlb:` + repeatedStringForHugetlb + `,`, - `MemoryEvents:` + strings.Replace(this.MemoryEvents.String(), "MemoryEvents", "MemoryEvents", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidsStat{`, - `Current:` + fmt.Sprintf("%v", this.Current) + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CPUStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUStat{`, - `UsageUsec:` + fmt.Sprintf("%v", this.UsageUsec) + `,`, - `UserUsec:` + fmt.Sprintf("%v", this.UserUsec) + `,`, - `SystemUsec:` + fmt.Sprintf("%v", this.SystemUsec) + `,`, - `NrPeriods:` + fmt.Sprintf("%v", this.NrPeriods) + `,`, - `NrThrottled:` + fmt.Sprintf("%v", this.NrThrottled) + `,`, - `ThrottledUsec:` + fmt.Sprintf("%v", this.ThrottledUsec) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryStat{`, - `Anon:` + fmt.Sprintf("%v", this.Anon) + `,`, - `File:` + fmt.Sprintf("%v", this.File) + `,`, - `KernelStack:` + fmt.Sprintf("%v", this.KernelStack) + `,`, - `Slab:` + fmt.Sprintf("%v", this.Slab) + `,`, - `Sock:` + fmt.Sprintf("%v", this.Sock) + `,`, - `Shmem:` + fmt.Sprintf("%v", this.Shmem) + `,`, - `FileMapped:` + fmt.Sprintf("%v", this.FileMapped) + `,`, - `FileDirty:` + fmt.Sprintf("%v", this.FileDirty) + `,`, - `FileWriteback:` + fmt.Sprintf("%v", this.FileWriteback) + `,`, - `AnonThp:` + fmt.Sprintf("%v", this.AnonThp) + `,`, - `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, - `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, - `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, - `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, - `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, - `SlabReclaimable:` + fmt.Sprintf("%v", this.SlabReclaimable) + `,`, - `SlabUnreclaimable:` + fmt.Sprintf("%v", this.SlabUnreclaimable) + `,`, - `Pgfault:` + fmt.Sprintf("%v", this.Pgfault) + `,`, - `Pgmajfault:` + fmt.Sprintf("%v", this.Pgmajfault) + `,`, - `WorkingsetRefault:` + fmt.Sprintf("%v", this.WorkingsetRefault) + `,`, - `WorkingsetActivate:` + fmt.Sprintf("%v", this.WorkingsetActivate) + `,`, - `WorkingsetNodereclaim:` + fmt.Sprintf("%v", this.WorkingsetNodereclaim) + `,`, - `Pgrefill:` + fmt.Sprintf("%v", this.Pgrefill) + `,`, - `Pgscan:` + fmt.Sprintf("%v", this.Pgscan) + `,`, - `Pgsteal:` + fmt.Sprintf("%v", this.Pgsteal) + `,`, - `Pgactivate:` + fmt.Sprintf("%v", this.Pgactivate) + `,`, - `Pgdeactivate:` + fmt.Sprintf("%v", this.Pgdeactivate) + `,`, - `Pglazyfree:` + fmt.Sprintf("%v", this.Pglazyfree) + `,`, - `Pglazyfreed:` + fmt.Sprintf("%v", this.Pglazyfreed) + `,`, - `ThpFaultAlloc:` + fmt.Sprintf("%v", this.ThpFaultAlloc) + `,`, - `ThpCollapseAlloc:` + fmt.Sprintf("%v", this.ThpCollapseAlloc) + `,`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `UsageLimit:` + fmt.Sprintf("%v", this.UsageLimit) + `,`, - `SwapUsage:` + fmt.Sprintf("%v", this.SwapUsage) + `,`, - `SwapLimit:` + fmt.Sprintf("%v", this.SwapLimit) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryEvents) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryEvents{`, - `Low:` + fmt.Sprintf("%v", this.Low) + `,`, - `High:` + fmt.Sprintf("%v", this.High) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Oom:` + fmt.Sprintf("%v", this.Oom) + `,`, - `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForCurrent := "[]*RdmaEntry{" - for _, f := range this.Current { - repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForCurrent += "}" - repeatedStringForLimit := "[]*RdmaEntry{" - for _, f := range this.Limit { - repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForLimit += "}" - s := strings.Join([]string{`&RdmaStat{`, - `Current:` + repeatedStringForCurrent + `,`, - `Limit:` + repeatedStringForLimit + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RdmaEntry{`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, - `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *IOStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForUsage := "[]*IOEntry{" - for _, f := range this.Usage { - repeatedStringForUsage += strings.Replace(f.String(), "IOEntry", "IOEntry", 1) + "," - } - repeatedStringForUsage += "}" - s := strings.Join([]string{`&IOStat{`, - `Usage:` + repeatedStringForUsage + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *IOEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IOEntry{`, - `Major:` + fmt.Sprintf("%v", this.Major) + `,`, - `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, - `Rbytes:` + fmt.Sprintf("%v", this.Rbytes) + `,`, - `Wbytes:` + fmt.Sprintf("%v", this.Wbytes) + `,`, - `Rios:` + fmt.Sprintf("%v", this.Rios) + `,`, - `Wios:` + fmt.Sprintf("%v", this.Wios) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *HugeTlbStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HugeTlbStat{`, - `Current:` + fmt.Sprintf("%v", this.Current) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringMetrics(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Metrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pids == nil { - m.Pids = &PidsStat{} - } - if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPU == nil { - m.CPU = &CPUStat{} - } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &MemoryStat{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Rdma == nil { - m.Rdma = &RdmaStat{} - } - if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Io", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Io == nil { - m.Io = &IOStat{} - } - if err := m.Io.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hugetlb = append(m.Hugetlb, &HugeTlbStat{}) - if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryEvents", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MemoryEvents == nil { - m.MemoryEvents = &MemoryEvents{} - } - if err := m.MemoryEvents.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsageUsec", wireType) - } - m.UsageUsec = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsageUsec |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UserUsec", wireType) - } - m.UserUsec = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UserUsec |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemUsec", wireType) - } - m.SystemUsec = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SystemUsec |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrPeriods", wireType) - } - m.NrPeriods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrPeriods |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrThrottled", wireType) - } - m.NrThrottled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrThrottled |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledUsec", wireType) - } - m.ThrottledUsec = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledUsec |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Anon", wireType) - } - m.Anon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Anon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) - } - m.File = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.File |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelStack", wireType) - } - m.KernelStack = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KernelStack |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Slab", wireType) - } - m.Slab = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Slab |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sock", wireType) - } - m.Sock = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Sock |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shmem", wireType) - } - m.Shmem = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Shmem |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileMapped", wireType) - } - m.FileMapped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FileMapped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileDirty", wireType) - } - m.FileDirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FileDirty |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileWriteback", wireType) - } - m.FileWriteback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FileWriteback |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AnonThp", wireType) - } - m.AnonThp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AnonThp |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) - } - m.InactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) - } - m.ActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) - } - m.InactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) - } - m.ActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) - } - m.Unevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Unevictable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SlabReclaimable", wireType) - } - m.SlabReclaimable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SlabReclaimable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SlabUnreclaimable", wireType) - } - m.SlabUnreclaimable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SlabUnreclaimable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pgfault", wireType) - } - m.Pgfault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pgfault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 19: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pgmajfault", wireType) - } - m.Pgmajfault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pgmajfault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingsetRefault", wireType) - } - m.WorkingsetRefault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WorkingsetRefault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingsetActivate", wireType) - } - m.WorkingsetActivate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WorkingsetActivate |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingsetNodereclaim", wireType) - } - m.WorkingsetNodereclaim = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WorkingsetNodereclaim |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pgrefill", wireType) - } - m.Pgrefill = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pgrefill |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pgscan", wireType) - } - m.Pgscan = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pgscan |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pgsteal", wireType) - } - m.Pgsteal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pgsteal |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 26: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pgactivate", wireType) - } - m.Pgactivate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pgactivate |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 27: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pgdeactivate", wireType) - } - m.Pgdeactivate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pgdeactivate |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 28: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pglazyfree", wireType) - } - m.Pglazyfree = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pglazyfree |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pglazyfreed", wireType) - } - m.Pglazyfreed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pglazyfreed |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThpFaultAlloc", wireType) - } - m.ThpFaultAlloc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThpFaultAlloc |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 31: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThpCollapseAlloc", wireType) - } - m.ThpCollapseAlloc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThpCollapseAlloc |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 32: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 33: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsageLimit", wireType) - } - m.UsageLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsageLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 34: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SwapUsage", wireType) - } - m.SwapUsage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SwapUsage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 35: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SwapLimit", wireType) - } - m.SwapLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SwapLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryEvents) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryEvents: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryEvents: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Low", wireType) - } - m.Low = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Low |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field High", wireType) - } - m.High = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.High |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Oom", wireType) - } - m.Oom = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Oom |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) - } - m.OomKill = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OomKill |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Current = append(m.Current, &RdmaEntry{}) - if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limit = append(m.Limit, &RdmaEntry{}) - if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) - } - m.HcaHandles = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaHandles |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) - } - m.HcaObjects = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaObjects |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IOStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IOStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IOStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Usage = append(m.Usage, &IOEntry{}) - if err := m.Usage[len(m.Usage)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IOEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IOEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IOEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) - } - m.Major = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Major |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) - } - m.Minor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Minor |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Rbytes", wireType) - } - m.Rbytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Rbytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Wbytes", wireType) - } - m.Wbytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Wbytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Rios", wireType) - } - m.Rios = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Rios |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Wios", wireType) - } - m.Wios = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Wios |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HugeTlbStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HugeTlbStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HugeTlbStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pagesize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.txt b/vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.txt deleted file mode 100644 index 59fe27c..0000000 --- a/vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.txt +++ /dev/null @@ -1,539 +0,0 @@ -file { - name: "github.com/containerd/cgroups/v2/stats/metrics.proto" - package: "io.containerd.cgroups.v2" - dependency: "gogoproto/gogo.proto" - message_type { - name: "Metrics" - field { - name: "pids" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.PidsStat" - json_name: "pids" - } - field { - name: "cpu" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.CPUStat" - options { - 65004: "CPU" - } - json_name: "cpu" - } - field { - name: "memory" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.MemoryStat" - json_name: "memory" - } - field { - name: "rdma" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.RdmaStat" - json_name: "rdma" - } - field { - name: "io" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.IOStat" - json_name: "io" - } - field { - name: "hugetlb" - number: 7 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.HugeTlbStat" - json_name: "hugetlb" - } - field { - name: "memory_events" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.MemoryEvents" - json_name: "memoryEvents" - } - } - message_type { - name: "PidsStat" - field { - name: "current" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "current" - } - field { - name: "limit" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "limit" - } - } - message_type { - name: "CPUStat" - field { - name: "usage_usec" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "usageUsec" - } - field { - name: "user_usec" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "userUsec" - } - field { - name: "system_usec" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "systemUsec" - } - field { - name: "nr_periods" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrPeriods" - } - field { - name: "nr_throttled" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrThrottled" - } - field { - name: "throttled_usec" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "throttledUsec" - } - } - message_type { - name: "MemoryStat" - field { - name: "anon" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "anon" - } - field { - name: "file" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "file" - } - field { - name: "kernel_stack" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "kernelStack" - } - field { - name: "slab" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "slab" - } - field { - name: "sock" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "sock" - } - field { - name: "shmem" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "shmem" - } - field { - name: "file_mapped" - number: 7 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "fileMapped" - } - field { - name: "file_dirty" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "fileDirty" - } - field { - name: "file_writeback" - number: 9 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "fileWriteback" - } - field { - name: "anon_thp" - number: 10 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "anonThp" - } - field { - name: "inactive_anon" - number: 11 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "inactiveAnon" - } - field { - name: "active_anon" - number: 12 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "activeAnon" - } - field { - name: "inactive_file" - number: 13 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "inactiveFile" - } - field { - name: "active_file" - number: 14 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "activeFile" - } - field { - name: "unevictable" - number: 15 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "unevictable" - } - field { - name: "slab_reclaimable" - number: 16 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "slabReclaimable" - } - field { - name: "slab_unreclaimable" - number: 17 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "slabUnreclaimable" - } - field { - name: "pgfault" - number: 18 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgfault" - } - field { - name: "pgmajfault" - number: 19 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgmajfault" - } - field { - name: "workingset_refault" - number: 20 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "workingsetRefault" - } - field { - name: "workingset_activate" - number: 21 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "workingsetActivate" - } - field { - name: "workingset_nodereclaim" - number: 22 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "workingsetNodereclaim" - } - field { - name: "pgrefill" - number: 23 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgrefill" - } - field { - name: "pgscan" - number: 24 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgscan" - } - field { - name: "pgsteal" - number: 25 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgsteal" - } - field { - name: "pgactivate" - number: 26 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgactivate" - } - field { - name: "pgdeactivate" - number: 27 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgdeactivate" - } - field { - name: "pglazyfree" - number: 28 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pglazyfree" - } - field { - name: "pglazyfreed" - number: 29 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pglazyfreed" - } - field { - name: "thp_fault_alloc" - number: 30 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "thpFaultAlloc" - } - field { - name: "thp_collapse_alloc" - number: 31 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "thpCollapseAlloc" - } - field { - name: "usage" - number: 32 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "usage" - } - field { - name: "usage_limit" - number: 33 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "usageLimit" - } - field { - name: "swap_usage" - number: 34 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "swapUsage" - } - field { - name: "swap_limit" - number: 35 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "swapLimit" - } - } - message_type { - name: "MemoryEvents" - field { - name: "low" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "low" - } - field { - name: "high" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "high" - } - field { - name: "max" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "max" - } - field { - name: "oom" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "oom" - } - field { - name: "oom_kill" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "oomKill" - } - } - message_type { - name: "RdmaStat" - field { - name: "current" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.RdmaEntry" - json_name: "current" - } - field { - name: "limit" - number: 2 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.RdmaEntry" - json_name: "limit" - } - } - message_type { - name: "RdmaEntry" - field { - name: "device" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "device" - } - field { - name: "hca_handles" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT32 - json_name: "hcaHandles" - } - field { - name: "hca_objects" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT32 - json_name: "hcaObjects" - } - } - message_type { - name: "IOStat" - field { - name: "usage" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v2.IOEntry" - json_name: "usage" - } - } - message_type { - name: "IOEntry" - field { - name: "major" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "major" - } - field { - name: "minor" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "minor" - } - field { - name: "rbytes" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rbytes" - } - field { - name: "wbytes" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "wbytes" - } - field { - name: "rios" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rios" - } - field { - name: "wios" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "wios" - } - } - message_type { - name: "HugeTlbStat" - field { - name: "current" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "current" - } - field { - name: "max" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "max" - } - field { - name: "pagesize" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "pagesize" - } - } - syntax: "proto3" -} diff --git a/vendor/github.com/containerd/cgroups/v2/stats/metrics.proto b/vendor/github.com/containerd/cgroups/v2/stats/metrics.proto deleted file mode 100644 index 8ac472e..0000000 --- a/vendor/github.com/containerd/cgroups/v2/stats/metrics.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package io.containerd.cgroups.v2; - - import "gogoproto/gogo.proto"; - -message Metrics { - PidsStat pids = 1; - CPUStat cpu = 2 [(gogoproto.customname) = "CPU"]; - MemoryStat memory = 4; - RdmaStat rdma = 5; - IOStat io = 6; - repeated HugeTlbStat hugetlb = 7; - MemoryEvents memory_events = 8; -} - -message PidsStat { - uint64 current = 1; - uint64 limit = 2; -} - -message CPUStat { - uint64 usage_usec = 1; - uint64 user_usec = 2; - uint64 system_usec = 3; - uint64 nr_periods = 4; - uint64 nr_throttled = 5; - uint64 throttled_usec = 6; -} - -message MemoryStat { - uint64 anon = 1; - uint64 file = 2; - uint64 kernel_stack = 3; - uint64 slab = 4; - uint64 sock = 5; - uint64 shmem = 6; - uint64 file_mapped = 7; - uint64 file_dirty = 8; - uint64 file_writeback = 9; - uint64 anon_thp = 10; - uint64 inactive_anon = 11; - uint64 active_anon = 12; - uint64 inactive_file = 13; - uint64 active_file = 14; - uint64 unevictable = 15; - uint64 slab_reclaimable = 16; - uint64 slab_unreclaimable = 17; - uint64 pgfault = 18; - uint64 pgmajfault = 19; - uint64 workingset_refault = 20; - uint64 workingset_activate = 21; - uint64 workingset_nodereclaim = 22; - uint64 pgrefill = 23; - uint64 pgscan = 24; - uint64 pgsteal = 25; - uint64 pgactivate = 26; - uint64 pgdeactivate = 27; - uint64 pglazyfree = 28; - uint64 pglazyfreed = 29; - uint64 thp_fault_alloc = 30; - uint64 thp_collapse_alloc = 31; - uint64 usage = 32; - uint64 usage_limit = 33; - uint64 swap_usage = 34; - uint64 swap_limit = 35; -} - -message MemoryEvents { - uint64 low = 1; - uint64 high = 2; - uint64 max = 3; - uint64 oom = 4; - uint64 oom_kill = 5; -} - -message RdmaStat { - repeated RdmaEntry current = 1; - repeated RdmaEntry limit = 2; -} - -message RdmaEntry { - string device = 1; - uint32 hca_handles = 2; - uint32 hca_objects = 3; -} - -message IOStat { - repeated IOEntry usage = 1; -} - -message IOEntry { - uint64 major = 1; - uint64 minor = 2; - uint64 rbytes = 3; - uint64 wbytes = 4; - uint64 rios = 5; - uint64 wios = 6; -} - -message HugeTlbStat { - uint64 current = 1; - uint64 max = 2; - string pagesize = 3; -} diff --git a/vendor/github.com/containerd/cgroups/v2/utils.go b/vendor/github.com/containerd/cgroups/v2/utils.go deleted file mode 100644 index 8b8d654..0000000 --- a/vendor/github.com/containerd/cgroups/v2/utils.go +++ /dev/null @@ -1,436 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/containerd/cgroups/v2/stats" - "github.com/godbus/dbus/v5" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - cgroupProcs = "cgroup.procs" - defaultDirPerm = 0755 -) - -// defaultFilePerm is a var so that the test framework can change the filemode -// of all files created when the tests are running. The difference between the -// tests and real world use is that files like "cgroup.procs" will exist when writing -// to a read cgroup filesystem and do not exist prior when running in the tests. -// this is set to a non 0 value in the test code -var defaultFilePerm = os.FileMode(0) - -// remove will remove a cgroup path handling EAGAIN and EBUSY errors and -// retrying the remove after a exp timeout -func remove(path string) error { - var err error - delay := 10 * time.Millisecond - for i := 0; i < 5; i++ { - if i != 0 { - time.Sleep(delay) - delay *= 2 - } - if err = os.RemoveAll(path); err == nil { - return nil - } - } - return errors.Wrapf(err, "cgroups: unable to remove path %q", path) -} - -// parseCgroupProcsFile parses /sys/fs/cgroup/$GROUPPATH/cgroup.procs -func parseCgroupProcsFile(path string) ([]uint64, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - var ( - out []uint64 - s = bufio.NewScanner(f) - ) - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.ParseUint(t, 10, 0) - if err != nil { - return nil, err - } - out = append(out, pid) - } - } - if err := s.Err(); err != nil { - return nil, err - } - return out, nil -} - -func parseKV(raw string) (string, interface{}, error) { - parts := strings.Fields(raw) - switch len(parts) { - case 2: - v, err := parseUint(parts[1], 10, 64) - if err != nil { - // if we cannot parse as a uint, parse as a string - return parts[0], parts[1], nil - } - return parts[0], v, nil - default: - return "", 0, ErrInvalidFormat - } -} - -func parseUint(s string, base, bitSize int) (uint64, error) { - v, err := strconv.ParseUint(s, base, bitSize) - if err != nil { - intValue, intErr := strconv.ParseInt(s, base, bitSize) - // 1. Handle negative values greater than MinInt64 (and) - // 2. Handle negative values lesser than MinInt64 - if intErr == nil && intValue < 0 { - return 0, nil - } else if intErr != nil && - intErr.(*strconv.NumError).Err == strconv.ErrRange && - intValue < 0 { - return 0, nil - } - return 0, err - } - return v, nil -} - -// parseCgroupFile parses /proc/PID/cgroup file and return string -func parseCgroupFile(path string) (string, error) { - f, err := os.Open(path) - if err != nil { - return "", err - } - defer f.Close() - return parseCgroupFromReader(f) -} - -func parseCgroupFromReader(r io.Reader) (string, error) { - var ( - s = bufio.NewScanner(r) - ) - for s.Scan() { - var ( - text = s.Text() - parts = strings.SplitN(text, ":", 3) - ) - if len(parts) < 3 { - return "", fmt.Errorf("invalid cgroup entry: %q", text) - } - // text is like "0::/user.slice/user-1001.slice/session-1.scope" - if parts[0] == "0" && parts[1] == "" { - return parts[2], nil - } - } - if err := s.Err(); err != nil { - return "", err - } - return "", fmt.Errorf("cgroup path not found") -} - -// ToResources converts the oci LinuxResources struct into a -// v2 Resources type for use with this package. -// -// converting cgroups configuration from v1 to v2 -// ref: https://github.com/containers/crun/blob/master/crun.1.md#cgroup-v2 -func ToResources(spec *specs.LinuxResources) *Resources { - var resources Resources - if cpu := spec.CPU; cpu != nil { - resources.CPU = &CPU{ - Cpus: cpu.Cpus, - Mems: cpu.Mems, - } - if shares := cpu.Shares; shares != nil { - convertedWeight := 1 + ((*shares-2)*9999)/262142 - resources.CPU.Weight = &convertedWeight - } - if period := cpu.Period; period != nil { - resources.CPU.Max = NewCPUMax(cpu.Quota, period) - } - } - if mem := spec.Memory; mem != nil { - resources.Memory = &Memory{} - if swap := mem.Swap; swap != nil { - resources.Memory.Swap = swap - } - if l := mem.Limit; l != nil { - resources.Memory.Max = l - } - if l := mem.Reservation; l != nil { - resources.Memory.Low = l - } - } - if hugetlbs := spec.HugepageLimits; hugetlbs != nil { - hugeTlbUsage := HugeTlb{} - for _, hugetlb := range hugetlbs { - hugeTlbUsage = append(hugeTlbUsage, HugeTlbEntry{ - HugePageSize: hugetlb.Pagesize, - Limit: hugetlb.Limit, - }) - } - resources.HugeTlb = &hugeTlbUsage - } - if pids := spec.Pids; pids != nil { - resources.Pids = &Pids{ - Max: pids.Limit, - } - } - if i := spec.BlockIO; i != nil { - resources.IO = &IO{} - if i.Weight != nil { - resources.IO.BFQ.Weight = 1 + (*i.Weight-10)*9999/990 - } - for t, devices := range map[IOType][]specs.LinuxThrottleDevice{ - ReadBPS: i.ThrottleReadBpsDevice, - WriteBPS: i.ThrottleWriteBpsDevice, - ReadIOPS: i.ThrottleReadIOPSDevice, - WriteIOPS: i.ThrottleWriteIOPSDevice, - } { - for _, d := range devices { - resources.IO.Max = append(resources.IO.Max, Entry{ - Type: t, - Major: d.Major, - Minor: d.Minor, - Rate: d.Rate, - }) - } - } - } - if i := spec.Rdma; i != nil { - resources.RDMA = &RDMA{} - for device, value := range spec.Rdma { - if device != "" && (value.HcaHandles != nil || value.HcaObjects != nil) { - resources.RDMA.Limit = append(resources.RDMA.Limit, RDMAEntry{ - Device: device, - HcaHandles: *value.HcaHandles, - HcaObjects: *value.HcaObjects, - }) - } - } - } - - return &resources -} - -// Gets uint64 parsed content of single value cgroup stat file -func getStatFileContentUint64(filePath string) uint64 { - contents, err := ioutil.ReadFile(filePath) - if err != nil { - return 0 - } - trimmed := strings.TrimSpace(string(contents)) - if trimmed == "max" { - return math.MaxUint64 - } - - res, err := parseUint(trimmed, 10, 64) - if err != nil { - logrus.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), filePath) - return res - } - - return res -} - -func readIoStats(path string) []*stats.IOEntry { - // more details on the io.stat file format: https://www.kernel.org/doc/Documentation/cgroup-v2.txt - var usage []*stats.IOEntry - fpath := filepath.Join(path, "io.stat") - currentData, err := ioutil.ReadFile(fpath) - if err != nil { - return usage - } - entries := strings.Split(string(currentData), "\n") - - for _, entry := range entries { - parts := strings.Split(entry, " ") - if len(parts) < 2 { - continue - } - majmin := strings.Split(parts[0], ":") - if len(majmin) != 2 { - continue - } - major, err := strconv.ParseUint(majmin[0], 10, 0) - if err != nil { - return usage - } - minor, err := strconv.ParseUint(majmin[1], 10, 0) - if err != nil { - return usage - } - parts = parts[1:] - ioEntry := stats.IOEntry{ - Major: major, - Minor: minor, - } - for _, s := range parts { - keyPairValue := strings.Split(s, "=") - if len(keyPairValue) != 2 { - continue - } - v, err := strconv.ParseUint(keyPairValue[1], 10, 0) - if err != nil { - continue - } - switch keyPairValue[0] { - case "rbytes": - ioEntry.Rbytes = v - case "wbytes": - ioEntry.Wbytes = v - case "rios": - ioEntry.Rios = v - case "wios": - ioEntry.Wios = v - } - } - usage = append(usage, &ioEntry) - } - return usage -} - -func rdmaStats(filepath string) []*stats.RdmaEntry { - currentData, err := ioutil.ReadFile(filepath) - if err != nil { - return []*stats.RdmaEntry{} - } - return toRdmaEntry(strings.Split(string(currentData), "\n")) -} - -func parseRdmaKV(raw string, entry *stats.RdmaEntry) { - var value uint64 - var err error - - parts := strings.Split(raw, "=") - switch len(parts) { - case 2: - if parts[1] == "max" { - value = math.MaxUint32 - } else { - value, err = parseUint(parts[1], 10, 32) - if err != nil { - return - } - } - if parts[0] == "hca_handle" { - entry.HcaHandles = uint32(value) - } else if parts[0] == "hca_object" { - entry.HcaObjects = uint32(value) - } - } -} - -func toRdmaEntry(strEntries []string) []*stats.RdmaEntry { - var rdmaEntries []*stats.RdmaEntry - for i := range strEntries { - parts := strings.Fields(strEntries[i]) - switch len(parts) { - case 3: - entry := new(stats.RdmaEntry) - entry.Device = parts[0] - parseRdmaKV(parts[1], entry) - parseRdmaKV(parts[2], entry) - - rdmaEntries = append(rdmaEntries, entry) - default: - continue - } - } - return rdmaEntries -} - -// isUnitExists returns true if the error is that a systemd unit already exists. -func isUnitExists(err error) bool { - if err != nil { - if dbusError, ok := err.(dbus.Error); ok { - return strings.Contains(dbusError.Name, "org.freedesktop.systemd1.UnitExists") - } - } - return false -} - -func systemdUnitFromPath(path string) string { - _, unit := filepath.Split(path) - return unit -} - -func readHugeTlbStats(path string) []*stats.HugeTlbStat { - var usage = []*stats.HugeTlbStat{} - var keyUsage = make(map[string]*stats.HugeTlbStat) - f, err := os.Open(path) - if err != nil { - return usage - } - files, err := f.Readdir(-1) - f.Close() - if err != nil { - return usage - } - - for _, file := range files { - if strings.Contains(file.Name(), "hugetlb") && - (strings.HasSuffix(file.Name(), "max") || strings.HasSuffix(file.Name(), "current")) { - var hugeTlb *stats.HugeTlbStat - var ok bool - fileName := strings.Split(file.Name(), ".") - pageSize := fileName[1] - if hugeTlb, ok = keyUsage[pageSize]; !ok { - hugeTlb = &stats.HugeTlbStat{} - } - hugeTlb.Pagesize = pageSize - out, err := ioutil.ReadFile(filepath.Join(path, file.Name())) - if err != nil { - continue - } - var value uint64 - stringVal := strings.TrimSpace(string(out)) - if stringVal == "max" { - value = math.MaxUint64 - } else { - value, err = strconv.ParseUint(stringVal, 10, 64) - } - if err != nil { - continue - } - switch fileName[2] { - case "max": - hugeTlb.Max = value - case "current": - hugeTlb.Current = value - } - keyUsage[pageSize] = hugeTlb - } - } - for _, entry := range keyUsage { - usage = append(usage, entry) - } - return usage -} diff --git a/vendor/github.com/containerd/console/.golangci.yml b/vendor/github.com/containerd/console/.golangci.yml deleted file mode 100644 index fcba5e8..0000000 --- a/vendor/github.com/containerd/console/.golangci.yml +++ /dev/null @@ -1,20 +0,0 @@ -linters: - enable: - - structcheck - - varcheck - - staticcheck - - unconvert - - gofmt - - goimports - - golint - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -run: - timeout: 3m - skip-dirs: - - vendor diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE deleted file mode 100644 index 584149b..0000000 --- a/vendor/github.com/containerd/console/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md deleted file mode 100644 index 580b461..0000000 --- a/vendor/github.com/containerd/console/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# console - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/console)](https://pkg.go.dev/github.com/containerd/console) -[![Build Status](https://github.com/containerd/console/workflows/CI/badge.svg)](https://github.com/containerd/console/actions?query=workflow%3ACI) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/console)](https://goreportcard.com/report/github.com/containerd/console) - -Golang package for dealing with consoles. Light on deps and a simple API. - -## Modifying the current process - -```go -current := console.Current() -defer current.Reset() - -if err := current.SetRaw(); err != nil { -} -ws, err := current.Size() -current.Resize(ws) -``` - -## Project details - -console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go deleted file mode 100644 index f989d28..0000000 --- a/vendor/github.com/containerd/console/console.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "errors" - "io" - "os" -) - -var ErrNotAConsole = errors.New("provided file is not a console") - -type File interface { - io.ReadWriteCloser - - // Fd returns its file descriptor - Fd() uintptr - // Name returns its file name - Name() string -} - -type Console interface { - File - - // Resize resizes the console to the provided window size - Resize(WinSize) error - // ResizeFrom resizes the calling console to the size of the - // provided console - ResizeFrom(Console) error - // SetRaw sets the console in raw mode - SetRaw() error - // DisableEcho disables echo on the console - DisableEcho() error - // Reset restores the console to its orignal state - Reset() error - // Size returns the window size of the console - Size() (WinSize, error) -} - -// WinSize specifies the window size of the console -type WinSize struct { - // Height of the console - Height uint16 - // Width of the console - Width uint16 - x uint16 - y uint16 -} - -// Current returns the current process' console -func Current() (c Console) { - var err error - // Usually all three streams (stdin, stdout, and stderr) - // are open to the same console, but some might be redirected, - // so try all three. - for _, s := range []*os.File{os.Stderr, os.Stdout, os.Stdin} { - if c, err = ConsoleFromFile(s); err == nil { - return c - } - } - // One of the std streams should always be a console - // for the design of this function. - panic(err) -} - -// ConsoleFromFile returns a console using the provided file -// nolint:golint -func ConsoleFromFile(f File) (Console, error) { - if err := checkConsole(f); err != nil { - return nil, err - } - return newMaster(f) -} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go deleted file mode 100644 index c1c839e..0000000 --- a/vendor/github.com/containerd/console/console_linux.go +++ /dev/null @@ -1,280 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "io" - "os" - "sync" - - "golang.org/x/sys/unix" -) - -const ( - maxEvents = 128 -) - -// Epoller manages multiple epoll consoles using edge-triggered epoll api so we -// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. -// For more details, see: -// - https://github.com/systemd/systemd/pull/4262 -// - https://github.com/moby/moby/issues/27202 -// -// Example usage of Epoller and EpollConsole can be as follow: -// -// epoller, _ := NewEpoller() -// epollConsole, _ := epoller.Add(console) -// go epoller.Wait() -// var ( -// b bytes.Buffer -// wg sync.WaitGroup -// ) -// wg.Add(1) -// go func() { -// io.Copy(&b, epollConsole) -// wg.Done() -// }() -// // perform I/O on the console -// epollConsole.Shutdown(epoller.CloseConsole) -// wg.Wait() -// epollConsole.Close() -type Epoller struct { - efd int - mu sync.Mutex - fdMapping map[int]*EpollConsole - closeOnce sync.Once -} - -// NewEpoller returns an instance of epoller with a valid epoll fd. -func NewEpoller() (*Epoller, error) { - efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if err != nil { - return nil, err - } - return &Epoller{ - efd: efd, - fdMapping: make(map[int]*EpollConsole), - }, nil -} - -// Add creates an epoll console based on the provided console. The console will -// be registered with EPOLLET (i.e. using edge-triggered notification) and its -// file descriptor will be set to non-blocking mode. After this, user should use -// the return console to perform I/O. -func (e *Epoller) Add(console Console) (*EpollConsole, error) { - sysfd := int(console.Fd()) - // Set sysfd to non-blocking mode - if err := unix.SetNonblock(sysfd, true); err != nil { - return nil, err - } - - ev := unix.EpollEvent{ - Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, - Fd: int32(sysfd), - } - if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { - return nil, err - } - ef := &EpollConsole{ - Console: console, - sysfd: sysfd, - readc: sync.NewCond(&sync.Mutex{}), - writec: sync.NewCond(&sync.Mutex{}), - } - e.mu.Lock() - e.fdMapping[sysfd] = ef - e.mu.Unlock() - return ef, nil -} - -// Wait starts the loop to wait for its consoles' notifications and signal -// appropriate console that it can perform I/O. -func (e *Epoller) Wait() error { - events := make([]unix.EpollEvent, maxEvents) - for { - n, err := unix.EpollWait(e.efd, events, -1) - if err != nil { - // EINTR: The call was interrupted by a signal handler before either - // any of the requested events occurred or the timeout expired - if err == unix.EINTR { - continue - } - return err - } - for i := 0; i < n; i++ { - ev := &events[i] - // the console is ready to be read from - if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { - if epfile := e.getConsole(int(ev.Fd)); epfile != nil { - epfile.signalRead() - } - } - // the console is ready to be written to - if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { - if epfile := e.getConsole(int(ev.Fd)); epfile != nil { - epfile.signalWrite() - } - } - } - } -} - -// CloseConsole unregisters the console's file descriptor from epoll interface -func (e *Epoller) CloseConsole(fd int) error { - e.mu.Lock() - defer e.mu.Unlock() - delete(e.fdMapping, fd) - return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) -} - -func (e *Epoller) getConsole(sysfd int) *EpollConsole { - e.mu.Lock() - f := e.fdMapping[sysfd] - e.mu.Unlock() - return f -} - -// Close closes the epoll fd -func (e *Epoller) Close() error { - closeErr := os.ErrClosed // default to "file already closed" - e.closeOnce.Do(func() { - closeErr = unix.Close(e.efd) - }) - return closeErr -} - -// EpollConsole acts like a console but registers its file descriptor with an -// epoll fd and uses epoll API to perform I/O. -type EpollConsole struct { - Console - readc *sync.Cond - writec *sync.Cond - sysfd int - closed bool -} - -// Read reads up to len(p) bytes into p. It returns the number of bytes read -// (0 <= n <= len(p)) and any error encountered. -// -// If the console's read returns EAGAIN or EIO, we assume that it's a -// temporary error because the other side went away and wait for the signal -// generated by epoll event to continue. -func (ec *EpollConsole) Read(p []byte) (n int, err error) { - var read int - ec.readc.L.Lock() - defer ec.readc.L.Unlock() - for { - read, err = ec.Console.Read(p[n:]) - n += read - if err != nil { - var hangup bool - if perr, ok := err.(*os.PathError); ok { - hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) - } else { - hangup = (err == unix.EAGAIN || err == unix.EIO) - } - // if the other end disappear, assume this is temporary and wait for the - // signal to continue again. Unless we didnt read anything and the - // console is already marked as closed then we should exit - if hangup && !(n == 0 && len(p) > 0 && ec.closed) { - ec.readc.Wait() - continue - } - } - break - } - // if we didnt read anything then return io.EOF to end gracefully - if n == 0 && len(p) > 0 && err == nil { - err = io.EOF - } - // signal for others that we finished the read - ec.readc.Signal() - return n, err -} - -// Writes len(p) bytes from p to the console. It returns the number of bytes -// written from p (0 <= n <= len(p)) and any error encountered that caused -// the write to stop early. -// -// If writes to the console returns EAGAIN or EIO, we assume that it's a -// temporary error because the other side went away and wait for the signal -// generated by epoll event to continue. -func (ec *EpollConsole) Write(p []byte) (n int, err error) { - var written int - ec.writec.L.Lock() - defer ec.writec.L.Unlock() - for { - written, err = ec.Console.Write(p[n:]) - n += written - if err != nil { - var hangup bool - if perr, ok := err.(*os.PathError); ok { - hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) - } else { - hangup = (err == unix.EAGAIN || err == unix.EIO) - } - // if the other end disappears, assume this is temporary and wait for the - // signal to continue again. - if hangup { - ec.writec.Wait() - continue - } - } - // unrecoverable error, break the loop and return the error - break - } - if n < len(p) && err == nil { - err = io.ErrShortWrite - } - // signal for others that we finished the write - ec.writec.Signal() - return n, err -} - -// Shutdown closes the file descriptor and signals call waiters for this fd. -// It accepts a callback which will be called with the console's fd. The -// callback typically will be used to do further cleanup such as unregister the -// console's fd from the epoll interface. -// User should call Shutdown and wait for all I/O operation to be finished -// before closing the console. -func (ec *EpollConsole) Shutdown(close func(int) error) error { - ec.readc.L.Lock() - defer ec.readc.L.Unlock() - ec.writec.L.Lock() - defer ec.writec.L.Unlock() - - ec.readc.Broadcast() - ec.writec.Broadcast() - ec.closed = true - return close(ec.sysfd) -} - -// signalRead signals that the console is readable. -func (ec *EpollConsole) signalRead() { - ec.readc.L.Lock() - ec.readc.Signal() - ec.readc.L.Unlock() -} - -// signalWrite signals that the console is writable. -func (ec *EpollConsole) signalWrite() { - ec.writec.L.Lock() - ec.writec.Signal() - ec.writec.L.Unlock() -} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go deleted file mode 100644 index a081176..0000000 --- a/vendor/github.com/containerd/console/console_unix.go +++ /dev/null @@ -1,156 +0,0 @@ -// +build darwin freebsd linux netbsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "golang.org/x/sys/unix" -) - -// NewPty creates a new pty pair -// The master is returned as the first console and a string -// with the path to the pty slave is returned as the second -func NewPty() (Console, string, error) { - f, err := openpt() - if err != nil { - return nil, "", err - } - slave, err := ptsname(f) - if err != nil { - return nil, "", err - } - if err := unlockpt(f); err != nil { - return nil, "", err - } - m, err := newMaster(f) - if err != nil { - return nil, "", err - } - return m, slave, nil -} - -type master struct { - f File - original *unix.Termios -} - -func (m *master) Read(b []byte) (int, error) { - return m.f.Read(b) -} - -func (m *master) Write(b []byte) (int, error) { - return m.f.Write(b) -} - -func (m *master) Close() error { - return m.f.Close() -} - -func (m *master) Resize(ws WinSize) error { - return tcswinsz(m.f.Fd(), ws) -} - -func (m *master) ResizeFrom(c Console) error { - ws, err := c.Size() - if err != nil { - return err - } - return m.Resize(ws) -} - -func (m *master) Reset() error { - if m.original == nil { - return nil - } - return tcset(m.f.Fd(), m.original) -} - -func (m *master) getCurrent() (unix.Termios, error) { - var termios unix.Termios - if err := tcget(m.f.Fd(), &termios); err != nil { - return unix.Termios{}, err - } - return termios, nil -} - -func (m *master) SetRaw() error { - rawState, err := m.getCurrent() - if err != nil { - return err - } - rawState = cfmakeraw(rawState) - rawState.Oflag = rawState.Oflag | unix.OPOST - return tcset(m.f.Fd(), &rawState) -} - -func (m *master) DisableEcho() error { - rawState, err := m.getCurrent() - if err != nil { - return err - } - rawState.Lflag = rawState.Lflag &^ unix.ECHO - return tcset(m.f.Fd(), &rawState) -} - -func (m *master) Size() (WinSize, error) { - return tcgwinsz(m.f.Fd()) -} - -func (m *master) Fd() uintptr { - return m.f.Fd() -} - -func (m *master) Name() string { - return m.f.Name() -} - -// checkConsole checks if the provided file is a console -func checkConsole(f File) error { - var termios unix.Termios - if tcget(f.Fd(), &termios) != nil { - return ErrNotAConsole - } - return nil -} - -func newMaster(f File) (Console, error) { - m := &master{ - f: f, - } - t, err := m.getCurrent() - if err != nil { - return nil, err - } - m.original = &t - return m, nil -} - -// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts normally. In particular, a not-very-well-known default of -// Linux unix98 ptys is that they have +onlcr by default. While this isn't a -// problem for terminal emulators, because we relay data from the terminal we -// also relay that funky line discipline. -func ClearONLCR(fd uintptr) error { - return setONLCR(fd, false) -} - -// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts as intended for a terminal emulator. -func SetONLCR(fd uintptr) error { - return setONLCR(fd, true) -} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go deleted file mode 100644 index 129a928..0000000 --- a/vendor/github.com/containerd/console/console_windows.go +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -var ( - vtInputSupported bool - ErrNotImplemented = errors.New("not implemented") -) - -func (m *master) initStdios() { - m.in = windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { - // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - windows.SetConsoleMode(m.in, m.inMode) - } else { - fmt.Printf("failed to get console mode for stdin: %v\n", err) - } - - m.out = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { - if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - windows.SetConsoleMode(m.out, m.outMode) - } - } else { - fmt.Printf("failed to get console mode for stdout: %v\n", err) - } - - m.err = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { - if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - windows.SetConsoleMode(m.err, m.errMode) - } - } else { - fmt.Printf("failed to get console mode for stderr: %v\n", err) - } -} - -type master struct { - in windows.Handle - inMode uint32 - - out windows.Handle - outMode uint32 - - err windows.Handle - errMode uint32 -} - -func (m *master) SetRaw() error { - if err := makeInputRaw(m.in, m.inMode); err != nil { - return err - } - - // Set StdOut and StdErr to raw mode, we ignore failures since - // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of - // Windows. - - windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - - windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - - return nil -} - -func (m *master) Reset() error { - for _, s := range []struct { - fd windows.Handle - mode uint32 - }{ - {m.in, m.inMode}, - {m.out, m.outMode}, - {m.err, m.errMode}, - } { - if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { - return errors.Wrap(err, "unable to restore console mode") - } - } - - return nil -} - -func (m *master) Size() (WinSize, error) { - var info windows.ConsoleScreenBufferInfo - err := windows.GetConsoleScreenBufferInfo(m.out, &info) - if err != nil { - return WinSize{}, errors.Wrap(err, "unable to get console info") - } - - winsize := WinSize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -func (m *master) Resize(ws WinSize) error { - return ErrNotImplemented -} - -func (m *master) ResizeFrom(c Console) error { - return ErrNotImplemented -} - -func (m *master) DisableEcho() error { - mode := m.inMode &^ windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT - mode |= windows.ENABLE_LINE_INPUT - - if err := windows.SetConsoleMode(m.in, mode); err != nil { - return errors.Wrap(err, "unable to set console to disable echo") - } - - return nil -} - -func (m *master) Close() error { - return nil -} - -func (m *master) Read(b []byte) (int, error) { - return os.Stdin.Read(b) -} - -func (m *master) Write(b []byte) (int, error) { - return os.Stdout.Write(b) -} - -func (m *master) Fd() uintptr { - return uintptr(m.in) -} - -// on windows, console can only be made from os.Std{in,out,err}, hence there -// isnt a single name here we can use. Return a dummy "console" value in this -// case should be sufficient. -func (m *master) Name() string { - return "console" -} - -// makeInputRaw puts the terminal (Windows Console) connected to the given -// file descriptor into raw mode -func makeInputRaw(fd windows.Handle, mode uint32) error { - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - if err := windows.SetConsoleMode(fd, mode); err != nil { - return errors.Wrap(err, "unable to set console to raw mode") - } - - return nil -} - -func checkConsole(f File) error { - var mode uint32 - if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { - return err - } - return nil -} - -func newMaster(f File) (Console, error) { - if f != os.Stdin && f != os.Stdout && f != os.Stderr { - return nil, errors.New("creating a console from a file is not supported on windows") - } - m := &master{} - m.initStdios() - return m, nil -} diff --git a/vendor/github.com/containerd/console/pty_freebsd_cgo.go b/vendor/github.com/containerd/console/pty_freebsd_cgo.go deleted file mode 100644 index cbd3cd7..0000000 --- a/vendor/github.com/containerd/console/pty_freebsd_cgo.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build freebsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" -) - -/* -#include -#include -#include -*/ -import "C" - -// openpt allocates a new pseudo-terminal and establishes a connection with its -// control device. -func openpt() (*os.File, error) { - fd, err := C.posix_openpt(C.O_RDWR) - if err != nil { - return nil, fmt.Errorf("posix_openpt: %w", err) - } - if _, err := C.grantpt(fd); err != nil { - C.close(fd) - return nil, fmt.Errorf("grantpt: %w", err) - } - return os.NewFile(uintptr(fd), ""), nil -} diff --git a/vendor/github.com/containerd/console/pty_freebsd_nocgo.go b/vendor/github.com/containerd/console/pty_freebsd_nocgo.go deleted file mode 100644 index b5e4318..0000000 --- a/vendor/github.com/containerd/console/pty_freebsd_nocgo.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build freebsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" -) - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -func openpt() (*os.File, error) { - panic("openpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_freebsd_cgo.go b/vendor/github.com/containerd/console/tc_freebsd_cgo.go deleted file mode 100644 index 0f3d272..0000000 --- a/vendor/github.com/containerd/console/tc_freebsd_cgo.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build freebsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -/* -#include -#include -*/ -import "C" - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - fd := C.int(f.Fd()) - if _, err := C.unlockpt(fd); err != nil { - C.close(fd) - return fmt.Errorf("unlockpt: %w", err) - } - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/containerd/console/tc_freebsd_nocgo.go b/vendor/github.com/containerd/console/tc_freebsd_nocgo.go deleted file mode 100644 index 087fc15..0000000 --- a/vendor/github.com/containerd/console/tc_freebsd_nocgo.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build freebsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go deleted file mode 100644 index 7d552ea..0000000 --- a/vendor/github.com/containerd/console/tc_linux.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - // XXX do not use unix.IoctlSetPointerInt here, see commit dbd69c59b81. - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 { - return err - } - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - var u uint32 - // XXX do not use unix.IoctlGetInt here, see commit dbd69c59b81. - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", u), nil -} diff --git a/vendor/github.com/containerd/console/tc_netbsd.go b/vendor/github.com/containerd/console/tc_netbsd.go deleted file mode 100644 index 71227ae..0000000 --- a/vendor/github.com/containerd/console/tc_netbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "bytes" - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -// This does not exist on NetBSD, it does not allocate controlling terminals on open -func unlockpt(f *os.File) error { - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptm, err := unix.IoctlGetPtmget(int(f.Fd()), unix.TIOCPTSNAME) - if err != nil { - return "", err - } - return string(ptm.Sn[:bytes.IndexByte(ptm.Sn[:], 0)]), nil -} diff --git a/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/vendor/github.com/containerd/console/tc_openbsd_cgo.go deleted file mode 100644 index f0cec06..0000000 --- a/vendor/github.com/containerd/console/tc_openbsd_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build openbsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -//#include -import "C" - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptspath, err := C.ptsname(C.int(f.Fd())) - if err != nil { - return "", err - } - return C.GoString(ptspath), nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - if _, err := C.grantpt(C.int(f.Fd())); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go deleted file mode 100644 index daccce2..0000000 --- a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build openbsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -func ptsname(f *os.File) (string, error) { - panic("ptsname() support requires cgo.") -} - -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go deleted file mode 100644 index e36a68e..0000000 --- a/vendor/github.com/containerd/console/tc_solaris_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build solaris,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -//#include -import "C" - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptspath, err := C.ptsname(C.int(f.Fd())) - if err != nil { - return "", err - } - return C.GoString(ptspath), nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - if _, err := C.grantpt(C.int(f.Fd())); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go deleted file mode 100644 index eb0bd2c..0000000 --- a/vendor/github.com/containerd/console/tc_solaris_nocgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build solaris,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -func ptsname(f *os.File) (string, error) { - panic("ptsname() support requires cgo.") -} - -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go deleted file mode 100644 index 5cd4c55..0000000 --- a/vendor/github.com/containerd/console/tc_unix.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build darwin freebsd linux netbsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr, p *unix.Termios) error { - termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) - if err != nil { - return err - } - *p = *termios - return nil -} - -func tcset(fd uintptr, p *unix.Termios) error { - return unix.IoctlSetTermios(int(fd), cmdTcSet, p) -} - -func tcgwinsz(fd uintptr) (WinSize, error) { - var ws WinSize - - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - if err != nil { - return ws, err - } - - // Translate from unix.Winsize to console.WinSize - ws.Height = uws.Row - ws.Width = uws.Col - ws.x = uws.Xpixel - ws.y = uws.Ypixel - return ws, nil -} - -func tcswinsz(fd uintptr, ws WinSize) error { - // Translate from console.WinSize to unix.Winsize - - var uws unix.Winsize - uws.Row = ws.Height - uws.Col = ws.Width - uws.Xpixel = ws.x - uws.Ypixel = ws.y - - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) -} - -func setONLCR(fd uintptr, enable bool) error { - var termios unix.Termios - if err := tcget(fd, &termios); err != nil { - return err - } - if enable { - // Set +onlcr so we can act like a real terminal - termios.Oflag |= unix.ONLCR - } else { - // Set -onlcr so we don't have to deal with \r. - termios.Oflag &^= unix.ONLCR - } - return tcset(fd, &termios) -} - -func cfmakeraw(t unix.Termios) unix.Termios { - t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - t.Oflag &^= unix.OPOST - t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - t.Cflag &^= (unix.CSIZE | unix.PARENB) - t.Cflag &^= unix.CS8 - t.Cc[unix.VMIN] = 1 - t.Cc[unix.VTIME] = 0 - - return t -} diff --git a/vendor/github.com/containerd/continuity/.gitignore b/vendor/github.com/containerd/continuity/.gitignore deleted file mode 100644 index 6921662..0000000 --- a/vendor/github.com/containerd/continuity/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -bin - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/containerd/continuity/.golangci.yml b/vendor/github.com/containerd/continuity/.golangci.yml deleted file mode 100644 index 92a7490..0000000 --- a/vendor/github.com/containerd/continuity/.golangci.yml +++ /dev/null @@ -1,18 +0,0 @@ -linters: - enable: - - structcheck - - varcheck - - staticcheck - - unconvert - - gofmt - - goimports - - golint - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -run: - timeout: 3m diff --git a/vendor/github.com/containerd/continuity/.mailmap b/vendor/github.com/containerd/continuity/.mailmap deleted file mode 100644 index f48ae41..0000000 --- a/vendor/github.com/containerd/continuity/.mailmap +++ /dev/null @@ -1 +0,0 @@ -Stephen J Day Stephen Day diff --git a/vendor/github.com/containerd/continuity/AUTHORS b/vendor/github.com/containerd/continuity/AUTHORS deleted file mode 100644 index 376ceb9..0000000 --- a/vendor/github.com/containerd/continuity/AUTHORS +++ /dev/null @@ -1,40 +0,0 @@ -Aaron Lehmann -Akash Gupta -Akihiro Suda -Akihiro Suda -Akihiro Suda -Andrew Pennebaker -Brandon Philips -Brian Goff -Christopher Jones -Daniel, Dao Quang Minh -Darren Stahl -Derek McGowan -Derek McGowan -Edward Pilatowicz -Ian Campbell -Ivan Markin -Justin Cormack -Justin Cummins -Kasper Fabæch Brandt -Kir Kolyshkin -Michael Crosby -Michael Crosby -Michael Wan -Mike Brown -Niels de Vos -Phil Estes -Phil Estes -Samuel Karp -Sam Whited -Sebastiaan van Stijn -Shengjing Zhu -Stephen J Day -Tibor Vass -Tobias Klauser -Tom Faulhaber -Tonis Tiigi -Trevor Porter -Wei Fu -Wilbert van de Ridder -Xiaodong Ye diff --git a/vendor/github.com/containerd/continuity/LICENSE b/vendor/github.com/containerd/continuity/LICENSE deleted file mode 100644 index 584149b..0000000 --- a/vendor/github.com/containerd/continuity/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/continuity/Makefile b/vendor/github.com/containerd/continuity/Makefile deleted file mode 100644 index 256a0b0..0000000 --- a/vendor/github.com/containerd/continuity/Makefile +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -GO_LDFLAGS=-ldflags "-X `go list -mod=vendor ./version`.Version=$(VERSION)" - -PKG=github.com/containerd/continuity - -PACKAGES=$(shell go list -mod=vendor ./... | grep -v /vendor/) -TEST_REQUIRES_ROOT_PACKAGES=$(filter \ - ${PACKAGES}, \ - $(shell \ - for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile); do \ - d="$$(dirname $$f)"; \ - [ "$$d" = "." ] && echo "${PKG}" && continue; \ - echo "${PKG}/$$d"; \ - done | sort -u) \ - ) - -.PHONY: clean all lint build test binaries -.DEFAULT: default - -all: AUTHORS clean lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -${PREFIX}/bin/continuity: version/version.go $(shell find . -type f -name '*.go') - @echo "+ $@" - @go build -mod=vendor -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/continuity - -generate: - go generate -mod=vendor $(PACKAGES) - -lint: - @echo "+ $@" - @golangci-lint run - -build: - @echo "+ $@" - @go build -mod=vendor -v ${GO_LDFLAGS} $(PACKAGES) - -test: - @echo "+ $@" - @go test -mod=vendor $(PACKAGES) - -root-test: - @echo "+ $@" - @go test ${TEST_REQUIRES_ROOT_PACKAGES} -test.root - -test-compile: - @echo "+ $@" - @for pkg in $(PACKAGES); do go test -mod=vendor -c $$pkg; done - -binaries: ${PREFIX}/bin/continuity - @echo "+ $@" - @if [ x$$GOOS = xwindows ]; then echo "+ continuity -> continuity.exe"; mv ${PREFIX}/bin/continuity ${PREFIX}/bin/continuity.exe; fi - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/continuity" "${PREFIX}/bin/continuity.exe" - diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md deleted file mode 100644 index 6fa50e1..0000000 --- a/vendor/github.com/containerd/continuity/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# continuity - -[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity) -[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity) - -A transport-agnostic, filesystem metadata manifest system - -This project is a staging area for experiments in providing transport agnostic -metadata storage. - -Please see https://github.com/opencontainers/specs/issues/11 for more details. - -## Manifest Format - -A continuity manifest encodes filesystem metadata in Protocol Buffers. -Please refer to [proto/manifest.proto](proto/manifest.proto). - -## Usage - -Build: - -```console -$ make -``` - -Create a manifest (of this repo itself): - -```console -$ ./bin/continuity build . > /tmp/a.pb -``` - -Dump a manifest: - -```console -$ ./bin/continuity ls /tmp/a.pb -... --rw-rw-r-- 270 B /.gitignore --rw-rw-r-- 88 B /.mailmap --rw-rw-r-- 187 B /.travis.yml --rw-rw-r-- 359 B /AUTHORS --rw-rw-r-- 11 kB /LICENSE --rw-rw-r-- 1.5 kB /Makefile -... --rw-rw-r-- 986 B /testutil_test.go -drwxrwxr-x 0 B /version --rw-rw-r-- 478 B /version/version.go -``` - -Verify a manifest: - -```console -$ ./bin/continuity verify . /tmp/a.pb -``` - -Break the directory and restore using the manifest: -```console -$ chmod 777 Makefile -$ ./bin/continuity verify . /tmp/a.pb -2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r-- -$ ./bin/continuity apply . /tmp/a.pb -$ stat -c %a Makefile -664 -$ ./bin/continuity verify . /tmp/a.pb -``` - -## Platforms - -continuity primarily targets Linux. continuity may compile for and work on -other operating systems, but those platforms are not tested. - -## Contribution Guide -### Building Proto Package - -If you change the proto file you will need to rebuild the generated Go with `go generate`. - -```console -$ go generate ./proto -``` - -## Project details - -continuity is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/continuity/context.go b/vendor/github.com/containerd/continuity/context.go deleted file mode 100644 index 2166142..0000000 --- a/vendor/github.com/containerd/continuity/context.go +++ /dev/null @@ -1,667 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/containerd/continuity/devices" - driverpkg "github.com/containerd/continuity/driver" - "github.com/containerd/continuity/pathdriver" - - "github.com/opencontainers/go-digest" -) - -var ( - // ErrNotFound represents the resource not found - ErrNotFound = fmt.Errorf("not found") - // ErrNotSupported represents the resource not supported - ErrNotSupported = fmt.Errorf("not supported") -) - -// Context represents a file system context for accessing resources. The -// responsibility of the context is to convert system specific resources to -// generic Resource objects. Most of this is safe path manipulation, as well -// as extraction of resource details. -type Context interface { - Apply(Resource) error - Verify(Resource) error - Resource(string, os.FileInfo) (Resource, error) - Walk(filepath.WalkFunc) error -} - -// SymlinkPath is intended to give the symlink target value -// in a root context. Target and linkname are absolute paths -// not under the given root. -type SymlinkPath func(root, linkname, target string) (string, error) - -// ContextOptions represents options to create a new context. -type ContextOptions struct { - Digester Digester - Driver driverpkg.Driver - PathDriver pathdriver.PathDriver - Provider ContentProvider -} - -// context represents a file system context for accessing resources. -// Generally, all path qualified access and system considerations should land -// here. -type context struct { - driver driverpkg.Driver - pathDriver pathdriver.PathDriver - root string - digester Digester - provider ContentProvider -} - -// NewContext returns a Context associated with root. The default driver will -// be used, as returned by NewDriver. -func NewContext(root string) (Context, error) { - return NewContextWithOptions(root, ContextOptions{}) -} - -// NewContextWithOptions returns a Context associate with the root. -func NewContextWithOptions(root string, options ContextOptions) (Context, error) { - // normalize to absolute path - pathDriver := options.PathDriver - if pathDriver == nil { - pathDriver = pathdriver.LocalPathDriver - } - - root = pathDriver.FromSlash(root) - root, err := pathDriver.Abs(pathDriver.Clean(root)) - if err != nil { - return nil, err - } - - driver := options.Driver - if driver == nil { - driver, err = driverpkg.NewSystemDriver() - if err != nil { - return nil, err - } - } - - digester := options.Digester - if digester == nil { - digester = simpleDigester{digest.Canonical} - } - - // Check the root directory. Need to be a little careful here. We are - // allowing a link for now, but this may have odd behavior when - // canonicalizing paths. As long as all files are opened through the link - // path, this should be okay. - fi, err := driver.Stat(root) - if err != nil { - return nil, err - } - - if !fi.IsDir() { - return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid} - } - - return &context{ - root: root, - driver: driver, - pathDriver: pathDriver, - digester: digester, - provider: options.Provider, - }, nil -} - -// Resource returns the resource as path p, populating the entry with info -// from fi. The path p should be the path of the resource in the context, -// typically obtained through Walk or from the value of Resource.Path(). If fi -// is nil, it will be resolved. -func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) { - fp, err := c.fullpath(p) - if err != nil { - return nil, err - } - - if fi == nil { - fi, err = c.driver.Lstat(fp) - if err != nil { - return nil, err - } - } - - base, err := newBaseResource(p, fi) - if err != nil { - return nil, err - } - - base.xattrs, err = c.resolveXAttrs(fp, fi, base) - if err == ErrNotSupported { - log.Printf("resolving xattrs on %s not supported", fp) - } else if err != nil { - return nil, err - } - - // TODO(stevvooe): Handle windows alternate data streams. - - if fi.Mode().IsRegular() { - dgst, err := c.digest(p) - if err != nil { - return nil, err - } - - return newRegularFile(*base, base.paths, fi.Size(), dgst) - } - - if fi.Mode().IsDir() { - return newDirectory(*base) - } - - if fi.Mode()&os.ModeSymlink != 0 { - // We handle relative links vs absolute links by including a - // beginning slash for absolute links. Effectively, the bundle's - // root is treated as the absolute link anchor. - target, err := c.driver.Readlink(fp) - if err != nil { - return nil, err - } - - return newSymLink(*base, target) - } - - if fi.Mode()&os.ModeNamedPipe != 0 { - return newNamedPipe(*base, base.paths) - } - - if fi.Mode()&os.ModeDevice != 0 { - deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver) - if !ok { - log.Printf("device extraction not supported %s", fp) - return nil, ErrNotSupported - } - - // character and block devices merely need to recover the - // major/minor device number. - major, minor, err := deviceDriver.DeviceInfo(fi) - if err != nil { - return nil, err - } - - return newDevice(*base, base.paths, major, minor) - } - - log.Printf("%q (%v) is not supported", fp, fi.Mode()) - return nil, ErrNotFound -} - -func (c *context) verifyMetadata(resource, target Resource) error { - if target.Mode() != resource.Mode() { - return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode()) - } - - if target.UID() != resource.UID() { - return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID()) - } - - if target.GID() != resource.GID() { - return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID()) - } - - if xattrer, ok := resource.(XAttrer); ok { - txattrer, tok := target.(XAttrer) - if !tok { - return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path()) - } - - // For xattrs, only ensure that we have those defined in the resource - // and their values match. We can ignore other xattrs. In other words, - // we only verify that target has the subset defined by resource. - txattrs := txattrer.XAttrs() - for attr, value := range xattrer.XAttrs() { - tvalue, ok := txattrs[attr] - if !ok { - return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr) - } - - if !bytes.Equal(value, tvalue) { - return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path()) - } - } - } - - switch r := resource.(type) { - case RegularFile: - // TODO(stevvooe): Another reason to use a record-based approach. We - // have to do another type switch to get this to work. This could be - // fixed with an Equal function, but let's study this a little more to - // be sure. - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - if t.Size() != r.Size() { - return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size()) - } - case Directory: - t, ok := target.(Directory) - if !ok { - return fmt.Errorf("resource %q target not a directory", t.Path()) - } - case SymLink: - t, ok := target.(SymLink) - if !ok { - return fmt.Errorf("resource %q target not a symlink", t.Path()) - } - - if t.Target() != r.Target() { - return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target()) - } - case Device: - t, ok := target.(Device) - if !ok { - return fmt.Errorf("resource %q is not a device", t.Path()) - } - - if t.Major() != r.Major() || t.Minor() != r.Minor() { - return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor()) - } - case NamedPipe: - t, ok := target.(NamedPipe) - if !ok { - return fmt.Errorf("resource %q is not a named pipe", t.Path()) - } - default: - return fmt.Errorf("cannot verify resource: %v", resource) - } - - return nil -} - -// Verify the resource in the context. An error will be returned a discrepancy -// is found. -func (c *context) Verify(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - fi, err := c.driver.Lstat(fp) - if err != nil { - return err - } - - target, err := c.Resource(resource.Path(), fi) - if err != nil { - return err - } - - if target.Path() != resource.Path() { - return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path()) - } - - if err := c.verifyMetadata(resource, target); err != nil { - return err - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - hardlinkKey, err := newHardlinkKey(fi) - if err == errNotAHardLink { - if len(h.Paths()) > 1 { - return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path()) - } - } else if err != nil { - return err - } - - for _, path := range h.Paths()[1:] { - fpLink, err := c.fullpath(path) - if err != nil { - return err - } - - fiLink, err := c.driver.Lstat(fpLink) - if err != nil { - return err - } - - targetLink, err := c.Resource(path, fiLink) - if err != nil { - return err - } - - hardlinkKeyLink, err := newHardlinkKey(fiLink) - if err != nil { - return err - } - - if hardlinkKeyLink != hardlinkKey { - return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path()) - } - - if err := c.verifyMetadata(resource, targetLink); err != nil { - return err - } - } - } - - switch r := resource.(type) { - case RegularFile: - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - // TODO(stevvooe): This may need to get a little more sophisticated - // for digest comparison. We may want to actually calculate the - // provided digests, rather than the implementations having an - // overlap. - if !digestsMatch(t.Digests(), r.Digests()) { - return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests()) - } - } - - return nil -} - -func (c *context) checkoutFile(fp string, rf RegularFile) error { - if c.provider == nil { - return fmt.Errorf("no file provider") - } - var ( - r io.ReadCloser - err error - ) - for _, dgst := range rf.Digests() { - r, err = c.provider.Reader(dgst) - if err == nil { - break - } - } - if err != nil { - return fmt.Errorf("file content could not be provided: %v", err) - } - defer r.Close() - - return atomicWriteFile(fp, r, rf.Size(), rf.Mode()) -} - -// Apply the resource to the contexts. An error will be returned if the -// operation fails. Depending on the resource type, the resource may be -// created. For resource that cannot be resolved, an error will be returned. -func (c *context) Apply(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - if !strings.HasPrefix(fp, c.root) { - return fmt.Errorf("resource %v escapes root", resource) - } - - var chmod = true - fi, err := c.driver.Lstat(fp) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - switch r := resource.(type) { - case RegularFile: - if fi == nil { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - chmod = false - } else { - if !fi.Mode().IsRegular() { - return fmt.Errorf("file %q should be a regular file, but is not", resource.Path()) - } - if fi.Size() != r.Size() { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - } else { - for _, dgst := range r.Digests() { - f, err := os.Open(fp) - if err != nil { - return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err) - } - compared, err := dgst.Algorithm().FromReader(f) - if err == nil && dgst != compared { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - break - } - if err1 := f.Close(); err == nil { - err = err1 - } - if err != nil { - return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err) - } - } - } - } - case Directory: - if fi == nil { - if err := c.driver.Mkdir(fp, resource.Mode()); err != nil { - return err - } - } else if !fi.Mode().IsDir() { - return fmt.Errorf("%q should be a directory, but is not", resource.Path()) - } - - case SymLink: - var target string // only possibly set if target resource is a symlink - - if fi != nil { - if fi.Mode()&os.ModeSymlink != 0 { - target, err = c.driver.Readlink(fp) - if err != nil { - return err - } - } - } - - if target != r.Target() { - if fi != nil { - if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory? - return err - } - } - - if err := c.driver.Symlink(r.Target(), fp); err != nil { - return err - } - } - - case Device: - if fi == nil { - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } else if (fi.Mode() & os.ModeDevice) == 0 { - return fmt.Errorf("%q should be a device, but is not", resource.Path()) - } else { - major, minor, err := devices.DeviceInfo(fi) - if err != nil { - return err - } - if major != r.Major() || minor != r.Minor() { - if err := c.driver.Remove(fp); err != nil { - return err - } - - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } - } - - case NamedPipe: - if fi == nil { - if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil { - return err - } - } else if (fi.Mode() & os.ModeNamedPipe) == 0 { - return fmt.Errorf("%q should be a named pipe, but is not", resource.Path()) - } - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - for _, path := range h.Paths() { - if path == resource.Path() { - continue - } - - lp, err := c.fullpath(path) - if err != nil { - return err - } - - if _, fi := c.driver.Lstat(lp); fi == nil { - c.driver.Remove(lp) - } - if err := c.driver.Link(fp, lp); err != nil { - return err - } - } - } - - // Update filemode if file was not created - if chmod { - if err := c.driver.Lchmod(fp, resource.Mode()); err != nil { - return err - } - } - - if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil { - return err - } - - if xattrer, ok := resource.(XAttrer); ok { - // For xattrs, only ensure that we have those defined in the resource - // and their values are set. We can ignore other xattrs. In other words, - // we only set xattres defined by resource but never remove. - - if _, ok := resource.(SymLink); ok { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path()) - } - if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } else { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - return fmt.Errorf("unsupported xattr for resource %q", resource.Path()) - } - if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } - } - - return nil -} - -// Walk provides a convenience function to call filepath.Walk correctly for -// the context. Otherwise identical to filepath.Walk, the path argument is -// corrected to be contained within the context. -func (c *context) Walk(fn filepath.WalkFunc) error { - root := c.root - fi, err := c.driver.Lstat(c.root) - if err == nil && fi.Mode()&os.ModeSymlink != 0 { - root, err = c.driver.Readlink(c.root) - if err != nil { - return err - } - } - return c.pathDriver.Walk(root, func(p string, fi os.FileInfo, _ error) error { - contained, err := c.containWithRoot(p, root) - return fn(contained, fi, err) - }) -} - -// fullpath returns the system path for the resource, joined with the context -// root. The path p must be a part of the context. -func (c *context) fullpath(p string) (string, error) { - p = c.pathDriver.Join(c.root, p) - if !strings.HasPrefix(p, c.root) { - return "", fmt.Errorf("invalid context path") - } - - return p, nil -} - -// containWithRoot cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the passed root. Extra care should be used when calling this -// instead of contain. This is needed for Walk, as if context root is a symlink, -// it must be evaluated prior to the Walk -func (c *context) containWithRoot(p string, root string) (string, error) { - sanitized, err := c.pathDriver.Rel(root, p) - if err != nil { - return "", err - } - - // ZOMBIES(stevvooe): In certain cases, we may want to remap these to a - // "containment error", so the caller can decide what to do. - return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil -} - -// digest returns the digest of the file at path p, relative to the root. -func (c *context) digest(p string) (digest.Digest, error) { - f, err := c.driver.Open(c.pathDriver.Join(c.root, p)) - if err != nil { - return "", err - } - defer f.Close() - - return c.digester.Digest(f) -} - -// resolveXAttrs attempts to resolve the extended attributes for the resource -// at the path fp, which is the full path to the resource. If the resource -// cannot have xattrs, nil will be returned. -func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) { - if fi.Mode().IsRegular() || fi.Mode().IsDir() { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - log.Println("xattr extraction not supported") - return nil, ErrNotSupported - } - - return xattrDriver.Getxattr(fp) - } - - if fi.Mode()&os.ModeSymlink != 0 { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - log.Println("xattr extraction for symlinks not supported") - return nil, ErrNotSupported - } - - return lxattrDriver.LGetxattr(fp) - } - - return nil, nil -} diff --git a/vendor/github.com/containerd/continuity/devices/devices.go b/vendor/github.com/containerd/continuity/devices/devices.go deleted file mode 100644 index e4d4a03..0000000 --- a/vendor/github.com/containerd/continuity/devices/devices.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package devices - -import "fmt" - -var ErrNotSupported = fmt.Errorf("not supported") diff --git a/vendor/github.com/containerd/continuity/devices/devices_unix.go b/vendor/github.com/containerd/continuity/devices/devices_unix.go deleted file mode 100644 index 225a04b..0000000 --- a/vendor/github.com/containerd/continuity/devices/devices_unix.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package devices - -import ( - "fmt" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo") - } - - //nolint:unconvert - dev := uint64(sys.Rdev) - return uint64(unix.Major(dev)), uint64(unix.Minor(dev)), nil -} - -// mknod provides a shortcut for syscall.Mknod -func Mknod(p string, mode os.FileMode, maj, min int) error { - var ( - m = syscallMode(mode.Perm()) - dev uint64 - ) - - if mode&os.ModeDevice != 0 { - dev = unix.Mkdev(uint32(maj), uint32(min)) - - if mode&os.ModeCharDevice != 0 { - m |= unix.S_IFCHR - } else { - m |= unix.S_IFBLK - } - } else if mode&os.ModeNamedPipe != 0 { - m |= unix.S_IFIFO - } - - return mknod(p, m, dev) -} - -// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. -func syscallMode(i os.FileMode) (o uint32) { - o |= uint32(i.Perm()) - if i&os.ModeSetuid != 0 { - o |= unix.S_ISUID - } - if i&os.ModeSetgid != 0 { - o |= unix.S_ISGID - } - if i&os.ModeSticky != 0 { - o |= unix.S_ISVTX - } - return -} diff --git a/vendor/github.com/containerd/continuity/devices/mknod_freebsd.go b/vendor/github.com/containerd/continuity/devices/mknod_freebsd.go deleted file mode 100644 index 33d18ec..0000000 --- a/vendor/github.com/containerd/continuity/devices/mknod_freebsd.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package devices - -import "golang.org/x/sys/unix" - -func mknod(path string, mode uint32, dev uint64) (err error) { - return unix.Mknod(path, mode, dev) -} diff --git a/vendor/github.com/containerd/continuity/digests.go b/vendor/github.com/containerd/continuity/digests.go deleted file mode 100644 index c1b699f..0000000 --- a/vendor/github.com/containerd/continuity/digests.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "io" - "sort" - - "github.com/opencontainers/go-digest" -) - -// Digester produces a digest for a given read stream -type Digester interface { - Digest(io.Reader) (digest.Digest, error) -} - -// ContentProvider produces a read stream for a given digest -type ContentProvider interface { - Reader(digest.Digest) (io.ReadCloser, error) -} - -type simpleDigester struct { - algorithm digest.Algorithm -} - -func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) { - digester := sd.algorithm.Digester() - - if _, err := io.Copy(digester.Hash(), r); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the -// digests are not repeated and no two digests with the same algorithm have -// different values. Because a stable sort is used, this has the effect of -// "zipping" digest collections from multiple resources. -func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) { - sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here. - seen := map[digest.Digest]struct{}{} - algs := map[digest.Algorithm][]digest.Digest{} // detect different digests. - - var out []digest.Digest - // uniqify the digests - for _, d := range digests { - if _, ok := seen[d]; ok { - continue - } - - seen[d] = struct{}{} - algs[d.Algorithm()] = append(algs[d.Algorithm()], d) - - if len(algs[d.Algorithm()]) > 1 { - return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm()) - } - - out = append(out, d) - } - - return out, nil -} - -// digestsMatch compares the two sets of digests to see if they match. -func digestsMatch(as, bs []digest.Digest) bool { - all := append(as, bs...) - - uniqified, err := uniqifyDigests(all...) - if err != nil { - // the only error uniqifyDigests returns is when the digests disagree. - return false - } - - disjoint := len(as) + len(bs) - // if these two sets have the same cardinality, we know both sides - // didn't share any digests. - return len(uniqified) != disjoint -} - -type digestSlice []digest.Digest - -func (p digestSlice) Len() int { return len(p) } -func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p digestSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/containerd/continuity/driver/driver.go b/vendor/github.com/containerd/continuity/driver/driver.go deleted file mode 100644 index e5d9d0f..0000000 --- a/vendor/github.com/containerd/continuity/driver/driver.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package driver - -import ( - "fmt" - "io" - "os" -) - -var ErrNotSupported = fmt.Errorf("not supported") - -// Driver provides all of the system-level functions in a common interface. -// The context should call these with full paths and should never use the `os` -// package or any other package to access resources on the filesystem. This -// mechanism let's us carefully control access to the context and maintain -// path and resource integrity. It also gives us an interface to reason about -// direct resource access. -// -// Implementations don't need to do much other than meet the interface. For -// example, it is not required to wrap os.FileInfo to return correct paths for -// the call to Name(). -type Driver interface { - // Note that Open() returns a File interface instead of *os.File. This - // is because os.File is a struct, so if Open was to return *os.File, - // the only way to fulfill the interface would be to call os.Open() - Open(path string) (File, error) - OpenFile(path string, flag int, perm os.FileMode) (File, error) - - Stat(path string) (os.FileInfo, error) - Lstat(path string) (os.FileInfo, error) - Readlink(p string) (string, error) - Mkdir(path string, mode os.FileMode) error - Remove(path string) error - - Link(oldname, newname string) error - Lchmod(path string, mode os.FileMode) error - Lchown(path string, uid, gid int64) error - Symlink(oldname, newname string) error - - MkdirAll(path string, perm os.FileMode) error - RemoveAll(path string) error - - // TODO(aaronl): These methods might move outside the main Driver - // interface in the future as more platforms are added. - Mknod(path string, mode os.FileMode, major int, minor int) error - Mkfifo(path string, mode os.FileMode) error -} - -// File is the interface for interacting with files returned by continuity's Open -// This is needed since os.File is a struct, instead of an interface, so it can't -// be used. -type File interface { - io.ReadWriteCloser - io.Seeker - Readdir(n int) ([]os.FileInfo, error) -} - -func NewSystemDriver() (Driver, error) { - // TODO(stevvooe): Consider having this take a "hint" path argument, which - // would be the context root. The hint could be used to resolve required - // filesystem support when assembling the driver to use. - return &driver{}, nil -} - -// XAttrDriver should be implemented on operation systems and filesystems that -// have xattr support for regular files and directories. -type XAttrDriver interface { - // Getxattr returns all of the extended attributes for the file at path. - // Typically, this takes a syscall call to Listxattr and Getxattr. - Getxattr(path string) (map[string][]byte, error) - - // Setxattr sets all of the extended attributes on file at path, following - // any symbolic links, if necessary. All attributes on the target are - // replaced by the values from attr. If the operation fails to set any - // attribute, those already applied will not be rolled back. - Setxattr(path string, attr map[string][]byte) error -} - -// LXAttrDriver should be implemented by drivers on operating systems and -// filesystems that support setting and getting extended attributes on -// symbolic links. If this is not implemented, extended attributes will be -// ignored on symbolic links. -type LXAttrDriver interface { - // LGetxattr returns all of the extended attributes for the file at path - // and does not follow symlinks. Typically, this takes a syscall call to - // Llistxattr and Lgetxattr. - LGetxattr(path string) (map[string][]byte, error) - - // LSetxattr sets all of the extended attributes on file at path, without - // following symbolic links. All attributes on the target are replaced by - // the values from attr. If the operation fails to set any attribute, - // those already applied will not be rolled back. - LSetxattr(path string, attr map[string][]byte) error -} - -type DeviceInfoDriver interface { - DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) -} - -// driver is a simple default implementation that sends calls out to the "os" -// package. Extend the "driver" type in system-specific files to add support, -// such as xattrs, which can add support at compile time. -type driver struct{} - -var _ File = &os.File{} - -// LocalDriver is the exported Driver struct for convenience. -var LocalDriver Driver = &driver{} - -func (d *driver) Open(p string) (File, error) { - return os.Open(p) -} - -func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) { - return os.OpenFile(path, flag, perm) -} - -func (d *driver) Stat(p string) (os.FileInfo, error) { - return os.Stat(p) -} - -func (d *driver) Lstat(p string) (os.FileInfo, error) { - return os.Lstat(p) -} - -func (d *driver) Readlink(p string) (string, error) { - return os.Readlink(p) -} - -func (d *driver) Mkdir(p string, mode os.FileMode) error { - return os.Mkdir(p, mode) -} - -// Remove is used to unlink files and remove directories. -// This is following the golang os package api which -// combines the operations into a higher level Remove -// function. If explicit unlinking or directory removal -// to mirror system call is required, they should be -// split up at that time. -func (d *driver) Remove(path string) error { - return os.Remove(path) -} - -func (d *driver) Link(oldname, newname string) error { - return os.Link(oldname, newname) -} - -func (d *driver) Lchown(name string, uid, gid int64) error { - // TODO: error out if uid excesses int bit width? - return os.Lchown(name, int(uid), int(gid)) -} - -func (d *driver) Symlink(oldname, newname string) error { - return os.Symlink(oldname, newname) -} - -func (d *driver) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (d *driver) RemoveAll(path string) error { - return os.RemoveAll(path) -} diff --git a/vendor/github.com/containerd/continuity/driver/driver_unix.go b/vendor/github.com/containerd/continuity/driver/driver_unix.go deleted file mode 100644 index 3e58d10..0000000 --- a/vendor/github.com/containerd/continuity/driver/driver_unix.go +++ /dev/null @@ -1,133 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package driver - -import ( - "errors" - "fmt" - "os" - "sort" - - "github.com/containerd/continuity/devices" - "github.com/containerd/continuity/sysx" -) - -func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { - err := devices.Mknod(path, mode, major, minor) - if err != nil { - err = &os.PathError{Op: "mknod", Path: path, Err: err} - } - return err -} - -func (d *driver) Mkfifo(path string, mode os.FileMode) error { - if mode&os.ModeNamedPipe == 0 { - return errors.New("mode passed to Mkfifo does not have the named pipe bit set") - } - // mknod with a mode that has ModeNamedPipe set creates a fifo, not a - // device. - err := devices.Mknod(path, mode, 0, 0) - if err != nil { - err = &os.PathError{Op: "mkfifo", Path: path, Err: err} - } - return err -} - -// Getxattr returns all of the extended attributes for the file at path p. -func (d *driver) Getxattr(p string) (map[string][]byte, error) { - xattrs, err := sysx.Listxattr(p) - if err != nil { - return nil, fmt.Errorf("listing %s xattrs: %v", p, err) - } - - sort.Strings(xattrs) - m := make(map[string][]byte, len(xattrs)) - - for _, attr := range xattrs { - value, err := sysx.Getxattr(p, attr) - if err != nil { - return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) - } - - // NOTE(stevvooe): This append/copy tricky relies on unique - // xattrs. Break this out into an alloc/copy if xattrs are no - // longer unique. - m[attr] = append(m[attr], value...) - } - - return m, nil -} - -// Setxattr sets all of the extended attributes on file at path, following -// any symbolic links, if necessary. All attributes on the target are -// replaced by the values from attr. If the operation fails to set any -// attribute, those already applied will not be rolled back. -func (d *driver) Setxattr(path string, attrMap map[string][]byte) error { - for attr, value := range attrMap { - if err := sysx.Setxattr(path, attr, value, 0); err != nil { - return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) - } - } - - return nil -} - -// LGetxattr returns all of the extended attributes for the file at path p -// not following symbolic links. -func (d *driver) LGetxattr(p string) (map[string][]byte, error) { - xattrs, err := sysx.LListxattr(p) - if err != nil { - return nil, fmt.Errorf("listing %s xattrs: %v", p, err) - } - - sort.Strings(xattrs) - m := make(map[string][]byte, len(xattrs)) - - for _, attr := range xattrs { - value, err := sysx.LGetxattr(p, attr) - if err != nil { - return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) - } - - // NOTE(stevvooe): This append/copy tricky relies on unique - // xattrs. Break this out into an alloc/copy if xattrs are no - // longer unique. - m[attr] = append(m[attr], value...) - } - - return m, nil -} - -// LSetxattr sets all of the extended attributes on file at path, not -// following any symbolic links. All attributes on the target are -// replaced by the values from attr. If the operation fails to set any -// attribute, those already applied will not be rolled back. -func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error { - for attr, value := range attrMap { - if err := sysx.LSetxattr(path, attr, value, 0); err != nil { - return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) - } - } - - return nil -} - -func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) { - return devices.DeviceInfo(fi) -} diff --git a/vendor/github.com/containerd/continuity/driver/driver_windows.go b/vendor/github.com/containerd/continuity/driver/driver_windows.go deleted file mode 100644 index 9baea3b..0000000 --- a/vendor/github.com/containerd/continuity/driver/driver_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build go1.13 - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Go 1.13 is the minimally supported version for Windows. -// Earlier golang releases have bug in os.Readlink -// (see https://github.com/golang/go/issues/30463). - -package driver - -import ( - "os" -) - -func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { - return &os.PathError{Op: "mknod", Path: path, Err: ErrNotSupported} -} - -func (d *driver) Mkfifo(path string, mode os.FileMode) error { - return &os.PathError{Op: "mkfifo", Path: path, Err: ErrNotSupported} -} - -// Lchmod changes the mode of an file not following symlinks. -func (d *driver) Lchmod(path string, mode os.FileMode) (err error) { - // TODO: Use Window's equivalent - return os.Chmod(path, mode) -} diff --git a/vendor/github.com/containerd/continuity/driver/lchmod_linux.go b/vendor/github.com/containerd/continuity/driver/lchmod_linux.go deleted file mode 100644 index 06be285..0000000 --- a/vendor/github.com/containerd/continuity/driver/lchmod_linux.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package driver - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Lchmod changes the mode of a file not following symlinks. -func (d *driver) Lchmod(path string, mode os.FileMode) error { - // On Linux, file mode is not supported for symlinks, - // and fchmodat() does not support AT_SYMLINK_NOFOLLOW, - // so symlinks need to be skipped entirely. - if st, err := os.Stat(path); err == nil && st.Mode()&os.ModeSymlink != 0 { - return nil - } - - err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), 0) - if err != nil { - err = &os.PathError{Op: "lchmod", Path: path, Err: err} - } - return err -} diff --git a/vendor/github.com/containerd/continuity/driver/utils.go b/vendor/github.com/containerd/continuity/driver/utils.go deleted file mode 100644 index 0c688d1..0000000 --- a/vendor/github.com/containerd/continuity/driver/utils.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package driver - -import ( - "io" - "io/ioutil" - "os" - "sort" -) - -// ReadFile works the same as ioutil.ReadFile with the Driver abstraction -func ReadFile(r Driver, filename string) ([]byte, error) { - f, err := r.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return data, nil -} - -// WriteFile works the same as ioutil.WriteFile with the Driver abstraction -func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error { - f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - defer f.Close() - - n, err := f.Write(data) - if err != nil { - return err - } else if n != len(data) { - return io.ErrShortWrite - } - - return nil -} - -// ReadDir works the same as ioutil.ReadDir with the Driver abstraction -func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) { - f, err := r.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - dirs, err := f.Readdir(-1) - if err != nil { - return nil, err - } - - sort.Sort(fileInfos(dirs)) - return dirs, nil -} - -// Simple implementation of the sort.Interface for os.FileInfo -type fileInfos []os.FileInfo - -func (fis fileInfos) Len() int { - return len(fis) -} - -func (fis fileInfos) Less(i, j int) bool { - return fis[i].Name() < fis[j].Name() -} - -func (fis fileInfos) Swap(i, j int) { - fis[i], fis[j] = fis[j], fis[i] -} diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go deleted file mode 100644 index 2ee77d1..0000000 --- a/vendor/github.com/containerd/continuity/fs/copy.go +++ /dev/null @@ -1,191 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/pkg/errors" -) - -var bufferPool = &sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 32*1024) - return &buffer - }, -} - -// XAttrErrorHandlers transform a non-nil xattr error. -// Return nil to ignore an error. -// xattrKey can be empty for listxattr operation. -type XAttrErrorHandler func(dst, src, xattrKey string, err error) error - -type copyDirOpts struct { - xeh XAttrErrorHandler - // xex contains a set of xattrs to exclude when copying - xex map[string]struct{} -} - -type CopyDirOpt func(*copyDirOpts) error - -// WithXAttrErrorHandler allows specifying XAttrErrorHandler -// If nil XAttrErrorHandler is specified (default), CopyDir stops -// on a non-nil xattr error. -func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt { - return func(o *copyDirOpts) error { - o.xeh = xeh - return nil - } -} - -// WithAllowXAttrErrors allows ignoring xattr errors. -func WithAllowXAttrErrors() CopyDirOpt { - xeh := func(dst, src, xattrKey string, err error) error { - return nil - } - return WithXAttrErrorHandler(xeh) -} - -// WithXAttrExclude allows for exclusion of specified xattr during CopyDir operation. -func WithXAttrExclude(keys ...string) CopyDirOpt { - return func(o *copyDirOpts) error { - if o.xex == nil { - o.xex = make(map[string]struct{}, len(keys)) - } - for _, key := range keys { - o.xex[key] = struct{}{} - } - return nil - } -} - -// CopyDir copies the directory from src to dst. -// Most efficient copy of files is attempted. -func CopyDir(dst, src string, opts ...CopyDirOpt) error { - var o copyDirOpts - for _, opt := range opts { - if err := opt(&o); err != nil { - return err - } - } - inodes := map[uint64]string{} - return copyDirectory(dst, src, inodes, &o) -} - -func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error { - stat, err := os.Stat(src) - if err != nil { - return errors.Wrapf(err, "failed to stat %s", src) - } - if !stat.IsDir() { - return errors.Errorf("source %s is not directory", src) - } - - if st, err := os.Stat(dst); err != nil { - if err := os.Mkdir(dst, stat.Mode()); err != nil { - return errors.Wrapf(err, "failed to mkdir %s", dst) - } - } else if !st.IsDir() { - return errors.Errorf("cannot copy to non-directory: %s", dst) - } else { - if err := os.Chmod(dst, stat.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod on %s", dst) - } - } - - fis, err := ioutil.ReadDir(src) - if err != nil { - return errors.Wrapf(err, "failed to read %s", src) - } - - if err := copyFileInfo(stat, dst); err != nil { - return errors.Wrapf(err, "failed to copy file info for %s", dst) - } - - if err := copyXAttrs(dst, src, o.xex, o.xeh); err != nil { - return errors.Wrap(err, "failed to copy xattrs") - } - - for _, fi := range fis { - source := filepath.Join(src, fi.Name()) - target := filepath.Join(dst, fi.Name()) - - switch { - case fi.IsDir(): - if err := copyDirectory(target, source, inodes, o); err != nil { - return err - } - continue - case (fi.Mode() & os.ModeType) == 0: - link, err := getLinkSource(target, fi, inodes) - if err != nil { - return errors.Wrap(err, "failed to get hardlink") - } - if link != "" { - if err := os.Link(link, target); err != nil { - return errors.Wrap(err, "failed to create hard link") - } - } else if err := CopyFile(target, source); err != nil { - return errors.Wrap(err, "failed to copy files") - } - case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: - link, err := os.Readlink(source) - if err != nil { - return errors.Wrapf(err, "failed to read link: %s", source) - } - if err := os.Symlink(link, target); err != nil { - return errors.Wrapf(err, "failed to create symlink: %s", target) - } - case (fi.Mode() & os.ModeDevice) == os.ModeDevice: - if err := copyDevice(target, fi); err != nil { - return errors.Wrapf(err, "failed to create device") - } - default: - // TODO: Support pipes and sockets - return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) - } - if err := copyFileInfo(fi, target); err != nil { - return errors.Wrap(err, "failed to copy file info") - } - - if err := copyXAttrs(target, source, o.xex, o.xeh); err != nil { - return errors.Wrap(err, "failed to copy xattrs") - } - } - - return nil -} - -// CopyFile copies the source file to the target. -// The most efficient means of copying is used for the platform. -func CopyFile(target, source string) error { - src, err := os.Open(source) - if err != nil { - return errors.Wrapf(err, "failed to open source %s", source) - } - defer src.Close() - tgt, err := os.Create(target) - if err != nil { - return errors.Wrapf(err, "failed to open target %s", target) - } - defer tgt.Close() - - return copyFileContent(tgt, src) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_freebsd.go b/vendor/github.com/containerd/continuity/fs/copy_freebsd.go deleted file mode 100644 index 4b116c9..0000000 --- a/vendor/github.com/containerd/continuity/fs/copy_freebsd.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "os" - "syscall" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), st.Rdev) -} - -func utimesNano(name string, atime, mtime syscall.Timespec) error { - at := unix.NsecToTimespec(atime.Nano()) - mt := unix.NsecToTimespec(mtime.Nano()) - utimes := [2]unix.Timespec{at, mt} - return unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go deleted file mode 100644 index 85beaee..0000000 --- a/vendor/github.com/containerd/continuity/fs/copy_linux.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io" - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) - if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { - if os.IsPermission(err) { - // Normally if uid/gid are the same this would be a no-op, but some - // filesystems may still return EPERM... for instance NFS does this. - // In such a case, this is not an error. - if dstStat, err2 := os.Lstat(name); err2 == nil { - st2 := dstStat.Sys().(*syscall.Stat_t) - if st.Uid == st2.Uid && st.Gid == st2.Gid { - err = nil - } - } - } - if err != nil { - return errors.Wrapf(err, "failed to chown %s", name) - } - } - - if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { - if err := os.Chmod(name, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", name) - } - } - - timespec := []unix.Timespec{ - unix.NsecToTimespec(syscall.TimespecToNsec(StatAtime(st))), - unix.NsecToTimespec(syscall.TimespecToNsec(StatMtime(st))), - } - if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } - - return nil -} - -const maxSSizeT = int64(^uint(0) >> 1) - -func copyFileContent(dst, src *os.File) error { - st, err := src.Stat() - if err != nil { - return errors.Wrap(err, "unable to stat source") - } - - size := st.Size() - first := true - srcFd := int(src.Fd()) - dstFd := int(dst.Fd()) - - for size > 0 { - // Ensure that we are never trying to copy more than SSIZE_MAX at a - // time and at the same time avoids overflows when the file is larger - // than 4GB on 32-bit systems. - var copySize int - if size > maxSSizeT { - copySize = int(maxSSizeT) - } else { - copySize = int(size) - } - n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0) - if err != nil { - if (err != unix.ENOSYS && err != unix.EXDEV) || !first { - return errors.Wrap(err, "copy file range failed") - } - - buf := bufferPool.Get().(*[]byte) - _, err = io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - return errors.Wrap(err, "userspace copy failed") - } - - first = false - size -= int64(n) - } - - return nil -} - -func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAttrErrorHandler) error { - xattrKeys, err := sysx.LListxattr(src) - if err != nil { - e := errors.Wrapf(err, "failed to list xattrs on %s", src) - if errorHandler != nil { - e = errorHandler(dst, src, "", e) - } - return e - } - for _, xattr := range xattrKeys { - if _, exclude := excludes[xattr]; exclude { - continue - } - data, err := sysx.LGetxattr(src, xattr) - if err != nil { - e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) - if errorHandler != nil { - if e = errorHandler(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { - e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) - if errorHandler != nil { - if e = errorHandler(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - } - - return nil -} - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_unix.go b/vendor/github.com/containerd/continuity/fs/copy_unix.go deleted file mode 100644 index 923dd5a..0000000 --- a/vendor/github.com/containerd/continuity/fs/copy_unix.go +++ /dev/null @@ -1,105 +0,0 @@ -// +build darwin freebsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io" - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" -) - -func copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) - if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { - if os.IsPermission(err) { - // Normally if uid/gid are the same this would be a no-op, but some - // filesystems may still return EPERM... for instance NFS does this. - // In such a case, this is not an error. - if dstStat, err2 := os.Lstat(name); err2 == nil { - st2 := dstStat.Sys().(*syscall.Stat_t) - if st.Uid == st2.Uid && st.Gid == st2.Gid { - err = nil - } - } - } - if err != nil { - return errors.Wrapf(err, "failed to chown %s", name) - } - } - - if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { - if err := os.Chmod(name, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", name) - } - } - - if err := utimesNano(name, StatAtime(st), StatMtime(st)); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } - - return nil -} - -func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().(*[]byte) - _, err := io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - - return err -} - -func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAttrErrorHandler) error { - xattrKeys, err := sysx.LListxattr(src) - if err != nil { - e := errors.Wrapf(err, "failed to list xattrs on %s", src) - if errorHandler != nil { - e = errorHandler(dst, src, "", e) - } - return e - } - for _, xattr := range xattrKeys { - if _, exclude := excludes[xattr]; exclude { - continue - } - data, err := sysx.LGetxattr(src, xattr) - if err != nil { - e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) - if errorHandler != nil { - if e = errorHandler(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { - e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) - if errorHandler != nil { - if e = errorHandler(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - } - - return nil -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go deleted file mode 100644 index 0081583..0000000 --- a/vendor/github.com/containerd/continuity/fs/copy_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io" - "os" - - "github.com/pkg/errors" -) - -func copyFileInfo(fi os.FileInfo, name string) error { - if err := os.Chmod(name, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", name) - } - - // TODO: copy windows specific metadata - - return nil -} - -func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().(*[]byte) - _, err := io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - return err -} - -func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAttrErrorHandler) error { - return nil -} - -func copyDevice(dst string, fi os.FileInfo) error { - return errors.New("device copy not supported") -} diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go deleted file mode 100644 index e64f9e7..0000000 --- a/vendor/github.com/containerd/continuity/fs/diff.go +++ /dev/null @@ -1,326 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "context" - "os" - "path/filepath" - "strings" - - "golang.org/x/sync/errgroup" - - "github.com/sirupsen/logrus" -) - -// ChangeKind is the type of modification that -// a change is making. -type ChangeKind int - -const ( - // ChangeKindUnmodified represents an unmodified - // file - ChangeKindUnmodified = iota - - // ChangeKindAdd represents an addition of - // a file - ChangeKindAdd - - // ChangeKindModify represents a change to - // an existing file - ChangeKindModify - - // ChangeKindDelete represents a delete of - // a file - ChangeKindDelete -) - -func (k ChangeKind) String() string { - switch k { - case ChangeKindUnmodified: - return "unmodified" - case ChangeKindAdd: - return "add" - case ChangeKindModify: - return "modify" - case ChangeKindDelete: - return "delete" - default: - return "" - } -} - -// Change represents single change between a diff and its parent. -type Change struct { - Kind ChangeKind - Path string -} - -// ChangeFunc is the type of function called for each change -// computed during a directory changes calculation. -type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error - -// Changes computes changes between two directories calling the -// given change function for each computed change. The first -// directory is intended to the base directory and second -// directory the changed directory. -// -// The change callback is called by the order of path names and -// should be appliable in that order. -// Due to this apply ordering, the following is true -// - Removed directory trees only create a single change for the root -// directory removed. Remaining changes are implied. -// - A directory which is modified to become a file will not have -// delete entries for sub-path items, their removal is implied -// by the removal of the parent directory. -// -// Opaque directories will not be treated specially and each file -// removed from the base directory will show up as a removal. -// -// File content comparisons will be done on files which have timestamps -// which may have been truncated. If either of the files being compared -// has a zero value nanosecond value, each byte will be compared for -// differences. If 2 files have the same seconds value but different -// nanosecond values where one of those values is zero, the files will -// be considered unchanged if the content is the same. This behavior -// is to account for timestamp truncation during archiving. -func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error { - if a == "" { - logrus.Debugf("Using single walk diff for %s", b) - return addDirChanges(ctx, changeFn, b) - } else if diffOptions := detectDirDiff(b, a); diffOptions != nil { - logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a) - return diffDirChanges(ctx, changeFn, a, diffOptions) - } - - logrus.Debugf("Using double walk diff for %s from %s", b, a) - return doubleWalkDiff(ctx, changeFn, a, b) -} - -func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error { - return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(root, path) - if err != nil { - return err - } - - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - return changeFn(ChangeKindAdd, path, f, nil) - }) -} - -// diffDirOptions is used when the diff can be directly calculated from -// a diff directory to its base, without walking both trees. -type diffDirOptions struct { - diffDir string - skipChange func(string) (bool, error) - deleteChange func(string, string, os.FileInfo) (string, error) -} - -// diffDirChanges walks the diff directory and compares changes against the base. -func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error { - changedDirs := make(map[string]struct{}) - return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(o.diffDir, path) - if err != nil { - return err - } - - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - // TODO: handle opaqueness, start new double walker at this - // location to get deletes, and skip tree in single walker - - if o.skipChange != nil { - if skip, err := o.skipChange(path); skip { - return err - } - } - - var kind ChangeKind - - deletedFile, err := o.deleteChange(o.diffDir, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - path = deletedFile - kind = ChangeKindDelete - f = nil - } else { - // Otherwise, the file was added - kind = ChangeKindAdd - - // ...Unless it already existed in a base, in which case, it's a modification - stat, err := os.Stat(filepath.Join(base, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the base, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - kind = ChangeKindModify - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if kind == ChangeKindAdd || kind == ChangeKindDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - pi, err := os.Stat(filepath.Join(o.diffDir, parent)) - if err := changeFn(ChangeKindModify, parent, pi, err); err != nil { - return err - } - changedDirs[parent] = struct{}{} - } - } - - return changeFn(kind, path, f, nil) - }) -} - -// doubleWalkDiff walks both directories to create a diff -func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) { - g, ctx := errgroup.WithContext(ctx) - - var ( - c1 = make(chan *currentPath) - c2 = make(chan *currentPath) - - f1, f2 *currentPath - rmdir string - ) - g.Go(func() error { - defer close(c1) - return pathWalk(ctx, a, c1) - }) - g.Go(func() error { - defer close(c2) - return pathWalk(ctx, b, c2) - }) - g.Go(func() error { - for c1 != nil || c2 != nil { - if f1 == nil && c1 != nil { - f1, err = nextPath(ctx, c1) - if err != nil { - return err - } - if f1 == nil { - c1 = nil - } - } - - if f2 == nil && c2 != nil { - f2, err = nextPath(ctx, c2) - if err != nil { - return err - } - if f2 == nil { - c2 = nil - } - } - if f1 == nil && f2 == nil { - continue - } - - var f os.FileInfo - k, p := pathChange(f1, f2) - switch k { - case ChangeKindAdd: - if rmdir != "" { - rmdir = "" - } - f = f2.f - f2 = nil - case ChangeKindDelete: - // Check if this file is already removed by being - // under of a removed directory - if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { - f1 = nil - continue - } else if f1.f.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f1 = nil - case ChangeKindModify: - same, err := sameFile(f1, f2) - if err != nil { - return err - } - if f1.f.IsDir() && !f2.f.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f = f2.f - f1 = nil - f2 = nil - if same { - if !isLinked(f) { - continue - } - k = ChangeKindUnmodified - } - } - if err := changeFn(k, p, f, nil); err != nil { - return err - } - } - return nil - }) - - return g.Wait() -} diff --git a/vendor/github.com/containerd/continuity/fs/diff_unix.go b/vendor/github.com/containerd/continuity/fs/diff_unix.go deleted file mode 100644 index 7913af2..0000000 --- a/vendor/github.com/containerd/continuity/fs/diff_unix.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "bytes" - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" -) - -// detectDirDiff returns diff dir options if a directory could -// be found in the mount info for upper which is the direct -// diff with the provided lower directory -func detectDirDiff(upper, lower string) *diffDirOptions { - // TODO: get mount options for upper - // TODO: detect AUFS - // TODO: detect overlay - return nil -} - -// compareSysStat returns whether the stats are equivalent, -// whether the files are considered the same file, and -// an error -func compareSysStat(s1, s2 interface{}) (bool, error) { - ls1, ok := s1.(*syscall.Stat_t) - if !ok { - return false, nil - } - ls2, ok := s2.(*syscall.Stat_t) - if !ok { - return false, nil - } - - return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil -} - -func compareCapabilities(p1, p2 string) (bool, error) { - c1, err := sysx.LGetxattr(p1, "security.capability") - if err != nil && err != sysx.ENODATA { - return false, errors.Wrapf(err, "failed to get xattr for %s", p1) - } - c2, err := sysx.LGetxattr(p2, "security.capability") - if err != nil && err != sysx.ENODATA { - return false, errors.Wrapf(err, "failed to get xattr for %s", p2) - } - return bytes.Equal(c1, c2), nil -} - -func isLinked(f os.FileInfo) bool { - s, ok := f.Sys().(*syscall.Stat_t) - if !ok { - return false - } - return !f.IsDir() && s.Nlink > 1 -} diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go deleted file mode 100644 index 10510d8..0000000 --- a/vendor/github.com/containerd/continuity/fs/dtype_linux.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "fmt" - "io/ioutil" - "os" - "syscall" - "unsafe" -) - -func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) - if err != nil { - return "", err - } - if len(children) != 0 { - return "", nil - } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") - if err != nil { - return "", err - } - name := dummyFile.Name() - err = dummyFile.Close() - return name, err -} - -// SupportsDType returns whether the filesystem mounted on path supports d_type -func SupportsDType(path string) (bool, error) { - // locate dummy so that we have at least one dirent - dummy, err := locateDummyIfEmpty(path) - if err != nil { - return false, err - } - if dummy != "" { - defer os.Remove(dummy) - } - - visited := 0 - supportsDType := true - fn := func(ent *syscall.Dirent) bool { - visited++ - if ent.Type == syscall.DT_UNKNOWN { - supportsDType = false - // stop iteration - return true - } - // continue iteration - return false - } - if err = iterateReadDir(path, fn); err != nil { - return false, err - } - if visited == 0 { - return false, fmt.Errorf("did not hit any dirent during iteration %s", path) - } - return supportsDType, nil -} - -func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { - d, err := os.Open(path) - if err != nil { - return err - } - defer d.Close() - fd := int(d.Fd()) - buf := make([]byte, 4096) - for { - nbytes, err := syscall.ReadDirent(fd, buf) - if err != nil { - return err - } - if nbytes == 0 { - break - } - for off := 0; off < nbytes; { - ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) - if stop := fn(ent); stop { - return nil - } - off += int(ent.Reclen) - } - } - return nil -} diff --git a/vendor/github.com/containerd/continuity/fs/du.go b/vendor/github.com/containerd/continuity/fs/du.go deleted file mode 100644 index fccc985..0000000 --- a/vendor/github.com/containerd/continuity/fs/du.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "context" - -// Usage of disk information -type Usage struct { - Inodes int64 - Size int64 -} - -// DiskUsage counts the number of inodes and disk usage for the resources under -// path. -func DiskUsage(ctx context.Context, roots ...string) (Usage, error) { - return diskUsage(ctx, roots...) -} - -// DiffUsage counts the numbers of inodes and disk usage in the -// diff between the 2 directories. The first path is intended -// as the base directory and the second as the changed directory. -func DiffUsage(ctx context.Context, a, b string) (Usage, error) { - return diffUsage(ctx, a, b) -} diff --git a/vendor/github.com/containerd/continuity/fs/du_unix.go b/vendor/github.com/containerd/continuity/fs/du_unix.go deleted file mode 100644 index 9da43d1..0000000 --- a/vendor/github.com/containerd/continuity/fs/du_unix.go +++ /dev/null @@ -1,120 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "context" - "os" - "path/filepath" - "syscall" -) - -// blocksUnitSize is the unit used by `st_blocks` in `stat` in bytes. -// See https://man7.org/linux/man-pages/man2/stat.2.html -// st_blocks -// This field indicates the number of blocks allocated to the -// file, in 512-byte units. (This may be smaller than -// st_size/512 when the file has holes.) -const blocksUnitSize = 512 - -type inode struct { - // TODO(stevvooe): Can probably reduce memory usage by not tracking - // device, but we can leave this right for now. - dev, ino uint64 -} - -func newInode(stat *syscall.Stat_t) inode { - return inode{ - // Dev is uint32 on darwin/bsd, uint64 on linux/solaris/freebsd - dev: uint64(stat.Dev), // nolint: unconvert - // Ino is uint32 on bsd, uint64 on darwin/linux/solaris/freebsd - ino: uint64(stat.Ino), // nolint: unconvert - } -} - -func diskUsage(ctx context.Context, roots ...string) (Usage, error) { - - var ( - size int64 - inodes = map[inode]struct{}{} // expensive! - ) - - for _, root := range roots { - if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - stat := fi.Sys().(*syscall.Stat_t) - inoKey := newInode(stat) - if _, ok := inodes[inoKey]; !ok { - inodes[inoKey] = struct{}{} - size += stat.Blocks * blocksUnitSize - } - - return nil - }); err != nil { - return Usage{}, err - } - } - - return Usage{ - Inodes: int64(len(inodes)), - Size: size, - }, nil -} - -func diffUsage(ctx context.Context, a, b string) (Usage, error) { - var ( - size int64 - inodes = map[inode]struct{}{} // expensive! - ) - - if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if kind == ChangeKindAdd || kind == ChangeKindModify { - stat := fi.Sys().(*syscall.Stat_t) - inoKey := newInode(stat) - if _, ok := inodes[inoKey]; !ok { - inodes[inoKey] = struct{}{} - size += stat.Blocks * blocksUnitSize - } - - return nil - - } - return nil - }); err != nil { - return Usage{}, err - } - - return Usage{ - Inodes: int64(len(inodes)), - Size: size, - }, nil -} diff --git a/vendor/github.com/containerd/continuity/fs/du_windows.go b/vendor/github.com/containerd/continuity/fs/du_windows.go deleted file mode 100644 index 8f25ec5..0000000 --- a/vendor/github.com/containerd/continuity/fs/du_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "context" - "os" - "path/filepath" -) - -func diskUsage(ctx context.Context, roots ...string) (Usage, error) { - var ( - size int64 - ) - - // TODO(stevvooe): Support inodes (or equivalent) for windows. - - for _, root := range roots { - if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - size += fi.Size() - return nil - }); err != nil { - return Usage{}, err - } - } - - return Usage{ - Size: size, - }, nil -} - -func diffUsage(ctx context.Context, a, b string) (Usage, error) { - var ( - size int64 - ) - - if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if kind == ChangeKindAdd || kind == ChangeKindModify { - size += fi.Size() - - return nil - - } - return nil - }); err != nil { - return Usage{}, err - } - - return Usage{ - Size: size, - }, nil -} diff --git a/vendor/github.com/containerd/continuity/fs/fstest/compare.go b/vendor/github.com/containerd/continuity/fs/fstest/compare.go deleted file mode 100644 index 0d100b6..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/compare.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -import ( - "io/ioutil" - "os" - - "github.com/containerd/continuity" - "github.com/pkg/errors" -) - -// CheckDirectoryEqual compares two directory paths to make sure that -// the content of the directories is the same. -func CheckDirectoryEqual(d1, d2 string) error { - c1, err := continuity.NewContext(d1) - if err != nil { - return errors.Wrap(err, "failed to build context") - } - - c2, err := continuity.NewContext(d2) - if err != nil { - return errors.Wrap(err, "failed to build context") - } - - m1, err := continuity.BuildManifest(c1) - if err != nil { - return errors.Wrap(err, "failed to build manifest") - } - - m2, err := continuity.BuildManifest(c2) - if err != nil { - return errors.Wrap(err, "failed to build manifest") - } - - diff := diffResourceList(m1.Resources, m2.Resources) - if diff.HasDiff() { - return errors.Errorf("directory diff between %s and %s\n%s", d1, d2, diff.String()) - } - - return nil -} - -// CheckDirectoryEqualWithApplier compares directory against applier -func CheckDirectoryEqualWithApplier(root string, a Applier) error { - applied, err := ioutil.TempDir("", "fstest") - if err != nil { - return err - } - defer os.RemoveAll(applied) - if err := a.Apply(applied); err != nil { - return err - } - return CheckDirectoryEqual(applied, root) -} diff --git a/vendor/github.com/containerd/continuity/fs/fstest/compare_unix.go b/vendor/github.com/containerd/continuity/fs/fstest/compare_unix.go deleted file mode 100644 index 21dcafd..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/compare_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -var metadataFiles map[string]bool diff --git a/vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go b/vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go deleted file mode 100644 index a357819..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -// TODO: Any more metadata files generated by Windows layers? -// TODO: Also skip Recycle Bin contents in Windows layers which is used to store deleted files in some cases -var metadataFiles = map[string]bool{ - "\\System Volume Information": true, - "\\WcSandboxState": true, -} diff --git a/vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go b/vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go deleted file mode 100644 index 3b687a6..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -import ( - "bytes" - "fmt" - - "github.com/containerd/continuity" -) - -type resourceUpdate struct { - Original continuity.Resource - Updated continuity.Resource -} - -func (u resourceUpdate) String() string { - return fmt.Sprintf("%s(mode: %o, uid: %d, gid: %d) -> %s(mode: %o, uid: %d, gid: %d)", - u.Original.Path(), u.Original.Mode(), u.Original.UID(), u.Original.GID(), - u.Updated.Path(), u.Updated.Mode(), u.Updated.UID(), u.Updated.GID(), - ) -} - -type resourceListDifference struct { - Additions []continuity.Resource - Deletions []continuity.Resource - Updates []resourceUpdate -} - -func (l resourceListDifference) HasDiff() bool { - if len(l.Deletions) > 0 || len(l.Updates) > 0 || (len(metadataFiles) == 0 && len(l.Additions) > 0) { - return true - } - - for _, add := range l.Additions { - if ok := metadataFiles[add.Path()]; !ok { - return true - } - } - - return false -} - -func (l resourceListDifference) String() string { - buf := bytes.NewBuffer(nil) - for _, add := range l.Additions { - fmt.Fprintf(buf, "+ %s\n", add.Path()) - } - for _, del := range l.Deletions { - fmt.Fprintf(buf, "- %s\n", del.Path()) - } - for _, upt := range l.Updates { - fmt.Fprintf(buf, "~ %s\n", upt.String()) - } - return buf.String() -} - -// diffManifest compares two resource lists and returns the list -// of adds updates and deletes, resource lists are not reordered -// before doing difference. -func diffResourceList(r1, r2 []continuity.Resource) resourceListDifference { - i1 := 0 - i2 := 0 - var d resourceListDifference - - for i1 < len(r1) && i2 < len(r2) { - p1 := r1[i1].Path() - p2 := r2[i2].Path() - switch { - case p1 < p2: - d.Deletions = append(d.Deletions, r1[i1]) - i1++ - case p1 == p2: - if !compareResource(r1[i1], r2[i2]) { - d.Updates = append(d.Updates, resourceUpdate{ - Original: r1[i1], - Updated: r2[i2], - }) - } - i1++ - i2++ - case p1 > p2: - d.Additions = append(d.Additions, r2[i2]) - i2++ - } - } - - for i1 < len(r1) { - d.Deletions = append(d.Deletions, r1[i1]) - i1++ - - } - for i2 < len(r2) { - d.Additions = append(d.Additions, r2[i2]) - i2++ - } - - return d -} - -func compareResource(r1, r2 continuity.Resource) bool { - if r1.Path() != r2.Path() { - return false - } - if r1.Mode() != r2.Mode() { - return false - } - if r1.UID() != r2.UID() { - return false - } - if r1.GID() != r2.GID() { - return false - } - - // TODO(dmcgowan): Check if is XAttrer - - return compareResourceTypes(r1, r2) - -} - -func compareResourceTypes(r1, r2 continuity.Resource) bool { - switch t1 := r1.(type) { - case continuity.RegularFile: - t2, ok := r2.(continuity.RegularFile) - if !ok { - return false - } - return compareRegularFile(t1, t2) - case continuity.Directory: - t2, ok := r2.(continuity.Directory) - if !ok { - return false - } - return compareDirectory(t1, t2) - case continuity.SymLink: - t2, ok := r2.(continuity.SymLink) - if !ok { - return false - } - return compareSymLink(t1, t2) - case continuity.NamedPipe: - t2, ok := r2.(continuity.NamedPipe) - if !ok { - return false - } - return compareNamedPipe(t1, t2) - case continuity.Device: - t2, ok := r2.(continuity.Device) - if !ok { - return false - } - return compareDevice(t1, t2) - default: - // TODO(dmcgowan): Should this panic? - return r1 == r2 - } -} - -func compareRegularFile(r1, r2 continuity.RegularFile) bool { - if r1.Size() != r2.Size() { - return false - } - p1 := r1.Paths() - p2 := r2.Paths() - if len(p1) != len(p2) { - return false - } - for i := range p1 { - if p1[i] != p2[i] { - return false - } - } - d1 := r1.Digests() - d2 := r2.Digests() - if len(d1) != len(d2) { - return false - } - for i := range d1 { - if d1[i] != d2[i] { - return false - } - } - - return true -} - -func compareSymLink(r1, r2 continuity.SymLink) bool { - return r1.Target() == r2.Target() -} - -func compareDirectory(r1, r2 continuity.Directory) bool { - return true -} - -func compareNamedPipe(r1, r2 continuity.NamedPipe) bool { - return true -} - -func compareDevice(r1, r2 continuity.Device) bool { - return r1.Major() == r2.Major() && r1.Minor() == r2.Minor() -} diff --git a/vendor/github.com/containerd/continuity/fs/fstest/file.go b/vendor/github.com/containerd/continuity/fs/fstest/file.go deleted file mode 100644 index 574b675..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/file.go +++ /dev/null @@ -1,184 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -import ( - "bytes" - "io" - "math/rand" - "os" - "path/filepath" - "syscall" - "time" -) - -// Applier applies single file changes -type Applier interface { - Apply(root string) error -} - -type applyFn func(root string) error - -func (a applyFn) Apply(root string) error { - return a(root) -} - -// CreateFile returns a file applier which creates a file as the -// provided name with the given content and permission. -func CreateFile(name string, content []byte, perm os.FileMode) Applier { - f := func() io.Reader { - return bytes.NewReader(content) - } - return writeFileStream(name, f, perm) -} - -// CreateRandomFile returns a file applier which creates a file with random -// content of the given size using the given seed and permission. -func CreateRandomFile(name string, seed, size int64, perm os.FileMode) Applier { - f := func() io.Reader { - return io.LimitReader(rand.New(rand.NewSource(seed)), size) - } - return writeFileStream(name, f, perm) -} - -// writeFileStream returns a file applier which creates a file as the -// provided name with the given content from the provided i/o stream and permission. -func writeFileStream(name string, stream func() io.Reader, perm os.FileMode) Applier { - return applyFn(func(root string) (retErr error) { - fullPath := filepath.Join(root, name) - f, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - defer func() { - err := f.Close() - if err != nil && retErr == nil { - retErr = err - } - }() - _, err = io.Copy(f, stream()) - if err != nil { - return err - } - return os.Chmod(fullPath, perm) - }) -} - -// Remove returns a file applier which removes the provided file name -func Remove(name string) Applier { - return applyFn(func(root string) error { - return os.Remove(filepath.Join(root, name)) - }) -} - -// RemoveAll returns a file applier which removes the provided file name -// as in os.RemoveAll -func RemoveAll(name string) Applier { - return applyFn(func(root string) error { - return os.RemoveAll(filepath.Join(root, name)) - }) -} - -// CreateDir returns a file applier to create the directory with -// the provided name and permission -func CreateDir(name string, perm os.FileMode) Applier { - return applyFn(func(root string) error { - fullPath := filepath.Join(root, name) - if err := os.MkdirAll(fullPath, perm); err != nil { - return err - } - return os.Chmod(fullPath, perm) - }) -} - -// Rename returns a file applier which renames a file -func Rename(old, new string) Applier { - return applyFn(func(root string) error { - return os.Rename(filepath.Join(root, old), filepath.Join(root, new)) - }) -} - -// Chown returns a file applier which changes the ownership of a file -func Chown(name string, uid, gid int) Applier { - return applyFn(func(root string) error { - return os.Chown(filepath.Join(root, name), uid, gid) - }) -} - -// Chtimes changes access and mod time of file. -// Use Lchtimes for symbolic links. -func Chtimes(name string, atime, mtime time.Time) Applier { - return applyFn(func(root string) error { - return os.Chtimes(filepath.Join(root, name), atime, mtime) - }) -} - -// Chmod returns a file applier which changes the file permission -func Chmod(name string, perm os.FileMode) Applier { - return applyFn(func(root string) error { - return os.Chmod(filepath.Join(root, name), perm) - }) -} - -// Symlink returns a file applier which creates a symbolic link -func Symlink(oldname, newname string) Applier { - return applyFn(func(root string) error { - return os.Symlink(oldname, filepath.Join(root, newname)) - }) -} - -// Link returns a file applier which creates a hard link -func Link(oldname, newname string) Applier { - return applyFn(func(root string) error { - return os.Link(filepath.Join(root, oldname), filepath.Join(root, newname)) - }) -} - -// TODO: Make platform specific, windows applier is always no-op -//func Mknod(name string, mode int32, dev int) Applier { -// return func(root string) error { -// return return syscall.Mknod(path, mode, dev) -// } -//} - -func CreateSocket(name string, perm os.FileMode) Applier { - return applyFn(func(root string) error { - fullPath := filepath.Join(root, name) - fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) - if err != nil { - return err - } - defer syscall.Close(fd) - sa := &syscall.SockaddrUnix{Name: fullPath} - if err := syscall.Bind(fd, sa); err != nil { - return err - } - return os.Chmod(fullPath, perm) - }) -} - -// Apply returns a new applier from the given appliers -func Apply(appliers ...Applier) Applier { - return applyFn(func(root string) error { - for _, a := range appliers { - if err := a.Apply(root); err != nil { - return err - } - } - return nil - }) -} diff --git a/vendor/github.com/containerd/continuity/fs/fstest/file_unix.go b/vendor/github.com/containerd/continuity/fs/fstest/file_unix.go deleted file mode 100644 index 9451198..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/file_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -import ( - "path/filepath" - "time" - - "github.com/containerd/continuity/sysx" - "golang.org/x/sys/unix" -) - -// SetXAttr sets the xatter for the file -func SetXAttr(name, key, value string) Applier { - return applyFn(func(root string) error { - path := filepath.Join(root, name) - return sysx.LSetxattr(path, key, []byte(value), 0) - }) -} - -// Lchtimes changes access and mod time of file without following symlink -func Lchtimes(name string, atime, mtime time.Time) Applier { - return applyFn(func(root string) error { - path := filepath.Join(root, name) - at := unix.NsecToTimespec(atime.UnixNano()) - mt := unix.NsecToTimespec(mtime.UnixNano()) - utimes := [2]unix.Timespec{at, mt} - return unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW) - }) -} - -func Base() Applier { - return applyFn(func(root string) error { - // do nothing, as the base is not special - return nil - }) -} diff --git a/vendor/github.com/containerd/continuity/fs/fstest/file_windows.go b/vendor/github.com/containerd/continuity/fs/fstest/file_windows.go deleted file mode 100644 index 1fab035..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/file_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -import ( - "time" - - "github.com/pkg/errors" -) - -// Lchtimes changes access and mod time of file without following symlink -func Lchtimes(name string, atime, mtime time.Time) Applier { - return applyFn(func(root string) error { - return errors.New("Not implemented") - }) -} - -// Base applies the files required to make a valid Windows container layer -// that the filter will mount. It is used for testing the snapshotter -func Base() Applier { - return Apply( - CreateDir("Windows", 0755), - CreateDir("Windows/System32", 0755), - CreateDir("Windows/System32/Config", 0755), - CreateFile("Windows/System32/Config/SYSTEM", []byte("foo\n"), 0777), - CreateFile("Windows/System32/Config/SOFTWARE", []byte("foo\n"), 0777), - CreateFile("Windows/System32/Config/SAM", []byte("foo\n"), 0777), - CreateFile("Windows/System32/Config/SECURITY", []byte("foo\n"), 0777), - CreateFile("Windows/System32/Config/DEFAULT", []byte("foo\n"), 0777), - ) -} diff --git a/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go b/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go deleted file mode 100644 index 360ef55..0000000 --- a/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go +++ /dev/null @@ -1,237 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fstest - -import ( - "context" - "io/ioutil" - "os" - "testing" -) - -// TestApplier applies the test context -type TestApplier interface { - TestContext(context.Context) (context.Context, func(), error) - Apply(context.Context, Applier) (string, func(), error) -} - -// FSSuite runs the path test suite -func FSSuite(t *testing.T, a TestApplier) { - t.Run("Basic", makeTest(t, a, basicTest)) - t.Run("Deletion", makeTest(t, a, deletionTest)) - t.Run("Update", makeTest(t, a, updateTest)) - t.Run("DirectoryPermission", makeTest(t, a, directoryPermissionsTest)) - t.Run("ParentDirectoryPermission", makeTest(t, a, parentDirectoryPermissionsTest)) - t.Run("HardlinkUnmodified", makeTest(t, a, hardlinkUnmodified)) - t.Run("HardlinkBeforeUnmodified", makeTest(t, a, hardlinkBeforeUnmodified)) - t.Run("HardlinkBeforeModified", makeTest(t, a, hardlinkBeforeModified)) -} - -func makeTest(t *testing.T, ta TestApplier, as []Applier) func(t *testing.T) { - return func(t *testing.T) { - ctx, cleanup, err := ta.TestContext(context.Background()) - if err != nil { - t.Fatalf("Unable to get test context: %+v", err) - } - defer cleanup() - - applyDir, err := ioutil.TempDir("", "test-expected-") - if err != nil { - t.Fatalf("Unable to make temp directory: %+v", err) - } - defer os.RemoveAll(applyDir) - - for i, a := range as { - testDir, c, err := ta.Apply(ctx, a) - if err != nil { - t.Fatalf("Apply failed at %d: %+v", i, err) - } - if err := a.Apply(applyDir); err != nil { - if c != nil { - c() - } - t.Fatalf("Error applying change to apply directory: %+v", err) - } - - err = CheckDirectoryEqual(applyDir, testDir) - if c != nil { - c() - } - if err != nil { - t.Fatalf("Directories not equal at %d (expected <> tested): %+v", i, err) - } - } - } -} - -var ( - // baseApplier creates a basic filesystem layout - // with multiple types of files for basic tests. - baseApplier = Apply( - CreateDir("/etc/", 0755), - CreateFile("/etc/hosts", []byte("127.0.0.1 localhost"), 0644), - Link("/etc/hosts", "/etc/hosts.allow"), - CreateDir("/usr/local/lib", 0755), - CreateFile("/usr/local/lib/libnothing.so", []byte{0x00, 0x00}, 0755), - Symlink("libnothing.so", "/usr/local/lib/libnothing.so.2"), - CreateDir("/home", 0755), - CreateDir("/home/derek", 0700), - // TODO: CreateSocket: how should Sockets be handled in continuity? - ) - - // basicTest covers basic operations - basicTest = []Applier{ - baseApplier, - Apply( - CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644), - CreateFile("/etc/fstab", []byte("/dev/sda1\t/\text4\tdefaults 1 1\n"), 0600), - CreateFile("/etc/badfile", []byte(""), 0666), - CreateFile("/home/derek/.zshrc", []byte("#ZSH is just better\n"), 0640), - ), - Apply( - Remove("/etc/badfile"), - Rename("/home/derek", "/home/notderek"), - ), - Apply( - RemoveAll("/usr"), - Remove("/etc/hosts.allow"), - ), - Apply( - RemoveAll("/home"), - CreateDir("/home/derek", 0700), - CreateFile("/home/derek/.bashrc", []byte("#not going away\n"), 0640), - Link("/etc/hosts", "/etc/hosts.allow"), - ), - } - - // deletionTest covers various deletion scenarios to ensure - // deletions are properly picked up and applied - deletionTest = []Applier{ - Apply( - CreateDir("/test/somedir", 0755), - CreateDir("/lib", 0700), - CreateFile("/lib/hidden", []byte{}, 0644), - ), - Apply( - CreateFile("/test/a", []byte{}, 0644), - CreateFile("/test/b", []byte{}, 0644), - CreateDir("/test/otherdir", 0755), - CreateFile("/test/otherdir/.empty", []byte{}, 0644), - RemoveAll("/lib"), - CreateDir("/lib", 0700), - CreateFile("/lib/not-hidden", []byte{}, 0644), - ), - Apply( - Remove("/test/a"), - Remove("/test/b"), - RemoveAll("/test/otherdir"), - CreateFile("/lib/newfile", []byte{}, 0644), - ), - } - - // updateTest covers file updates for content and permission - updateTest = []Applier{ - Apply( - CreateDir("/d1", 0755), - CreateDir("/d2", 0700), - CreateFile("/d1/f1", []byte("something..."), 0644), - CreateFile("/d1/f2", []byte("else..."), 0644), - CreateFile("/d1/f3", []byte("entirely..."), 0644), - ), - Apply( - CreateFile("/d1/f1", []byte("file content of a different length"), 0664), - Remove("/d1/f3"), - CreateFile("/d1/f3", []byte("updated content"), 0664), - Chmod("/d1/f2", 0766), - Chmod("/d2", 0777), - ), - } - - // directoryPermissionsTest covers directory permissions on update - directoryPermissionsTest = []Applier{ - Apply( - CreateDir("/d1", 0700), - CreateDir("/d2", 0751), - CreateDir("/d3", 0777), - ), - Apply( - CreateFile("/d1/f", []byte("irrelevant"), 0644), - CreateDir("/d1/d", 0700), - CreateFile("/d1/d/f", []byte("irrelevant"), 0644), - CreateFile("/d2/f", []byte("irrelevant"), 0644), - CreateFile("/d3/f", []byte("irrelevant"), 0644), - ), - } - - // parentDirectoryPermissionsTest covers directory permissions for updated - // files - parentDirectoryPermissionsTest = []Applier{ - Apply( - CreateDir("/d1", 0700), - CreateDir("/d1/a", 0700), - CreateDir("/d1/a/b", 0700), - CreateDir("/d1/a/b/c", 0700), - CreateFile("/d1/a/b/f", []byte("content1"), 0644), - CreateDir("/d2", 0751), - CreateDir("/d2/a/b", 0751), - CreateDir("/d2/a/b/c", 0751), - CreateFile("/d2/a/b/f", []byte("content1"), 0644), - ), - Apply( - CreateFile("/d1/a/b/f", []byte("content1"), 0644), - Chmod("/d1/a/b/c", 0700), - CreateFile("/d2/a/b/f", []byte("content2"), 0644), - Chmod("/d2/a/b/c", 0751), - ), - } - - hardlinkUnmodified = []Applier{ - baseApplier, - Apply( - CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644), - ), - Apply( - Link("/etc/hosts", "/etc/hosts.deny"), - ), - } - - // Hardlink name before with modification - // Tests link is created for unmodified files when a new hard linked file is seen first - hardlinkBeforeUnmodified = []Applier{ - baseApplier, - Apply( - CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644), - ), - Apply( - Link("/etc/hosts", "/etc/before-hosts"), - ), - } - - // Hardlink name after without modification - // tests link is created for modified file with new hardlink - hardlinkBeforeModified = []Applier{ - baseApplier, - Apply( - CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644), - ), - Apply( - Remove("/etc/hosts"), - CreateFile("/etc/hosts", []byte("127.0.0.1 localhost"), 0644), - Link("/etc/hosts", "/etc/before-hosts"), - ), - } -) diff --git a/vendor/github.com/containerd/continuity/fs/hardlink.go b/vendor/github.com/containerd/continuity/fs/hardlink.go deleted file mode 100644 index 762aa45..0000000 --- a/vendor/github.com/containerd/continuity/fs/hardlink.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "os" - -// GetLinkInfo returns an identifier representing the node a hardlink is pointing -// to. If the file is not hard linked then 0 will be returned. -func GetLinkInfo(fi os.FileInfo) (uint64, bool) { - return getLinkInfo(fi) -} - -// getLinkSource returns a path for the given name and -// file info to its link source in the provided inode -// map. If the given file name is not in the map and -// has other links, it is added to the inode map -// to be a source for other link locations. -func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { - inode, isHardlink := getLinkInfo(fi) - if !isHardlink { - return "", nil - } - - path, ok := inodes[inode] - if !ok { - inodes[inode] = name - } - return path, nil -} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go deleted file mode 100644 index 7485547..0000000 --- a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "os" - -func getLinkInfo(fi os.FileInfo) (uint64, bool) { - return 0, false -} diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go deleted file mode 100644 index c26be79..0000000 --- a/vendor/github.com/containerd/continuity/fs/path.go +++ /dev/null @@ -1,311 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "bytes" - "context" - "io" - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -var ( - errTooManyLinks = errors.New("too many links") -) - -type currentPath struct { - path string - f os.FileInfo - fullPath string -} - -func pathChange(lower, upper *currentPath) (ChangeKind, string) { - if lower == nil { - if upper == nil { - panic("cannot compare nil paths") - } - return ChangeKindAdd, upper.path - } - if upper == nil { - return ChangeKindDelete, lower.path - } - - switch i := directoryCompare(lower.path, upper.path); { - case i < 0: - // File in lower that is not in upper - return ChangeKindDelete, lower.path - case i > 0: - // File in upper that is not in lower - return ChangeKindAdd, upper.path - default: - return ChangeKindModify, upper.path - } -} - -func directoryCompare(a, b string) int { - l := len(a) - if len(b) < l { - l = len(b) - } - for i := 0; i < l; i++ { - c1, c2 := a[i], b[i] - if c1 == filepath.Separator { - c1 = byte(0) - } - if c2 == filepath.Separator { - c2 = byte(0) - } - if c1 < c2 { - return -1 - } - if c1 > c2 { - return +1 - } - } - if len(a) < len(b) { - return -1 - } - if len(a) > len(b) { - return +1 - } - return 0 -} - -func sameFile(f1, f2 *currentPath) (bool, error) { - if os.SameFile(f1.f, f2.f) { - return true, nil - } - - equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys()) - if err != nil || !equalStat { - return equalStat, err - } - - if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq { - return eq, err - } - - // If not a directory also check size, modtime, and content - if !f1.f.IsDir() { - if f1.f.Size() != f2.f.Size() { - return false, nil - } - t1 := f1.f.ModTime() - t2 := f2.f.ModTime() - - if t1.Unix() != t2.Unix() { - return false, nil - } - - // If the timestamp may have been truncated in both of the - // files, check content of file to determine difference - if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { - if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { - return compareSymlinkTarget(f1.fullPath, f2.fullPath) - } - if f1.f.Size() == 0 { // if file sizes are zero length, the files are the same by definition - return true, nil - } - return compareFileContent(f1.fullPath, f2.fullPath) - } else if t1.Nanosecond() != t2.Nanosecond() { - return false, nil - } - } - - return true, nil -} - -func compareSymlinkTarget(p1, p2 string) (bool, error) { - t1, err := os.Readlink(p1) - if err != nil { - return false, err - } - t2, err := os.Readlink(p2) - if err != nil { - return false, err - } - return t1 == t2, nil -} - -const compareChuckSize = 32 * 1024 - -// compareFileContent compares the content of 2 same sized files -// by comparing each byte. -func compareFileContent(p1, p2 string) (bool, error) { - f1, err := os.Open(p1) - if err != nil { - return false, err - } - defer f1.Close() - f2, err := os.Open(p2) - if err != nil { - return false, err - } - defer f2.Close() - - b1 := make([]byte, compareChuckSize) - b2 := make([]byte, compareChuckSize) - for { - n1, err1 := f1.Read(b1) - if err1 != nil && err1 != io.EOF { - return false, err1 - } - n2, err2 := f2.Read(b2) - if err2 != nil && err2 != io.EOF { - return false, err2 - } - if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { - return false, nil - } - if err1 == io.EOF && err2 == io.EOF { - return true, nil - } - } -} - -func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error { - return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(root, path) - if err != nil { - return err - } - - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - p := ¤tPath{ - path: path, - f: f, - fullPath: filepath.Join(root, path), - } - - select { - case <-ctx.Done(): - return ctx.Err() - case pathC <- p: - return nil - } - }) -} - -func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case p := <-pathC: - return p, nil - } -} - -// RootPath joins a path with a root, evaluating and bounding any -// symlink to the root directory. -func RootPath(root, path string) (string, error) { - if path == "" { - return root, nil - } - var linksWalked int // to protect against cycles - for { - i := linksWalked - newpath, err := walkLinks(root, path, &linksWalked) - if err != nil { - return "", err - } - path = newpath - if i == linksWalked { - newpath = filepath.Join("/", newpath) - if path == newpath { - return filepath.Join(root, newpath), nil - } - path = newpath - } - } -} - -func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { - if *linksWalked > 255 { - return "", false, errTooManyLinks - } - - path = filepath.Join("/", path) - if path == "/" { - return path, false, nil - } - realPath := filepath.Join(root, path) - - fi, err := os.Lstat(realPath) - if err != nil { - // If path does not yet exist, treat as non-symlink - if os.IsNotExist(err) { - return path, false, nil - } - return "", false, err - } - if fi.Mode()&os.ModeSymlink == 0 { - return path, false, nil - } - newpath, err = os.Readlink(realPath) - if err != nil { - return "", false, err - } - *linksWalked++ - return newpath, true, nil -} - -func walkLinks(root, path string, linksWalked *int) (string, error) { - switch dir, file := filepath.Split(path); { - case dir == "": - newpath, _, err := walkLink(root, file, linksWalked) - return newpath, err - case file == "": - if os.IsPathSeparator(dir[len(dir)-1]) { - if dir == "/" { - return dir, nil - } - return walkLinks(root, dir[:len(dir)-1], linksWalked) - } - newpath, _, err := walkLink(root, dir, linksWalked) - return newpath, err - default: - newdir, err := walkLinks(root, dir, linksWalked) - if err != nil { - return "", err - } - newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) - if err != nil { - return "", err - } - if !islink { - return newpath, nil - } - if filepath.IsAbs(newpath) { - return newpath, nil - } - return filepath.Join(newdir, newpath), nil - } -} diff --git a/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go b/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go deleted file mode 100644 index cb7400a..0000000 --- a/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build darwin freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "syscall" - "time" -) - -// StatAtime returns the access time from a stat struct -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atimespec -} - -// StatCtime returns the created time from a stat struct -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctimespec -} - -// StatMtime returns the modified time from a stat struct -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtimespec -} - -// StatATimeAsTime returns the access time as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go b/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go deleted file mode 100644 index c68df6e..0000000 --- a/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build linux openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - // The int64 conversions ensure the line compiles for 32-bit systems as well. - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/continuity/fs/time.go b/vendor/github.com/containerd/continuity/fs/time.go deleted file mode 100644 index cde4561..0000000 --- a/vendor/github.com/containerd/continuity/fs/time.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "time" - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} diff --git a/vendor/github.com/containerd/continuity/groups_unix.go b/vendor/github.com/containerd/continuity/groups_unix.go deleted file mode 100644 index 7b86767..0000000 --- a/vendor/github.com/containerd/continuity/groups_unix.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -//nolint:unused,deadcode -package continuity - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// TODO(stevvooe): This needs a lot of work before we can call it useful. - -type groupIndex struct { - byName map[string]*group - byGID map[int]*group -} - -func getGroupIndex() (*groupIndex, error) { - f, err := os.Open("/etc/group") - if err != nil { - return nil, err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return nil, err - } - - return newGroupIndex(groups), nil -} - -func newGroupIndex(groups []group) *groupIndex { - gi := &groupIndex{ - byName: make(map[string]*group), - byGID: make(map[int]*group), - } - - for i, group := range groups { - gi.byGID[group.gid] = &groups[i] - gi.byName[group.name] = &groups[i] - } - - return gi -} - -type group struct { - name string - gid int - members []string -} - -func getGroupName(gid int) (string, error) { - f, err := os.Open("/etc/group") - if err != nil { - return "", err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return "", err - } - - for _, group := range groups { - if group.gid == gid { - return group.name, nil - } - } - - return "", fmt.Errorf("no group for gid") -} - -// parseGroups parses an /etc/group file for group names, ids and membership. -// This is unix specific. -func parseGroups(rd io.Reader) ([]group, error) { - var groups []group - scanner := bufio.NewScanner(rd) - - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "#") { - continue // skip comment - } - - parts := strings.SplitN(scanner.Text(), ":", 4) - - if len(parts) != 4 { - return nil, fmt.Errorf("bad entry: %q", scanner.Text()) - } - - name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3] - - gid, err := strconv.Atoi(sgid) - if err != nil { - return nil, fmt.Errorf("bad gid: %q", gid) - } - - members := strings.Split(smembers, ",") - - groups = append(groups, group{ - name: name, - gid: gid, - members: members, - }) - } - - if scanner.Err() != nil { - return nil, scanner.Err() - } - - return groups, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks.go b/vendor/github.com/containerd/continuity/hardlinks.go deleted file mode 100644 index e72c0e7..0000000 --- a/vendor/github.com/containerd/continuity/hardlinks.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" -) - -var ( - errNotAHardLink = fmt.Errorf("invalid hardlink") -) - -type hardlinkManager struct { - hardlinks map[hardlinkKey][]Resource -} - -func newHardlinkManager() *hardlinkManager { - return &hardlinkManager{ - hardlinks: map[hardlinkKey][]Resource{}, - } -} - -// Add attempts to add the resource to the hardlink manager. If the resource -// cannot be considered as a hardlink candidate, errNotAHardLink is returned. -func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error { - if _, ok := resource.(Hardlinkable); !ok { - return errNotAHardLink - } - - key, err := newHardlinkKey(fi) - if err != nil { - return err - } - - hlm.hardlinks[key] = append(hlm.hardlinks[key], resource) - - return nil -} - -// Merge processes the current state of the hardlink manager and merges any -// shared nodes into hard linked resources. -func (hlm *hardlinkManager) Merge() ([]Resource, error) { - var resources []Resource - for key, linked := range hlm.hardlinks { - if len(linked) < 1 { - return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key) - } - - merged, err := Merge(linked...) - if err != nil { - return nil, fmt.Errorf("error merging hardlink: %v", err) - } - - resources = append(resources, merged) - } - - return resources, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_unix.go b/vendor/github.com/containerd/continuity/hardlinks_unix.go deleted file mode 100644 index 7105a7c..0000000 --- a/vendor/github.com/containerd/continuity/hardlinks_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// hardlinkKey provides a tuple-key for managing hardlinks. This is system- -// specific. -type hardlinkKey struct { - dev uint64 - inode uint64 -} - -// newHardlinkKey returns a hardlink key for the provided file info. If the -// resource does not represent a possible hardlink, errNotAHardLink will be -// returned. -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo") - } - - if sys.Nlink < 2 { - // NOTE(stevvooe): This is not always true for all filesystems. We - // should somehow detect this and provided a slow "polyfill" that - // leverages os.SameFile if we detect a filesystem where link counts - // is not really supported. - return hardlinkKey{}, errNotAHardLink - } - - //nolint:unconvert - return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_windows.go b/vendor/github.com/containerd/continuity/hardlinks_windows.go deleted file mode 100644 index 5893f4e..0000000 --- a/vendor/github.com/containerd/continuity/hardlinks_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import "os" - -type hardlinkKey struct{} - -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - // NOTE(stevvooe): Obviously, this is not yet implemented. However, the - // makings of an implementation are available in src/os/types_windows.go. More - // investigation needs to be done to figure out exactly how to do this. - return hardlinkKey{}, errNotAHardLink -} diff --git a/vendor/github.com/containerd/continuity/ioutils.go b/vendor/github.com/containerd/continuity/ioutils.go deleted file mode 100644 index 503640e..0000000 --- a/vendor/github.com/containerd/continuity/ioutils.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// AtomicWriteFile atomically writes data to a file by first writing to a -// temp file and calling rename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - buf := bytes.NewBuffer(data) - return atomicWriteFile(filename, buf, int64(len(data)), perm) -} - -// atomicWriteFile writes data to a file by first writing to a temp -// file and calling rename. -func atomicWriteFile(filename string, r io.Reader, dataSize int64, perm os.FileMode) error { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return err - } - err = os.Chmod(f.Name(), perm) - if err != nil { - f.Close() - return err - } - n, err := io.Copy(f, r) - if err == nil && n < dataSize { - f.Close() - return io.ErrShortWrite - } - if err != nil { - f.Close() - return err - } - if err := f.Sync(); err != nil { - f.Close() - return err - } - if err := f.Close(); err != nil { - return err - } - return os.Rename(f.Name(), filename) -} diff --git a/vendor/github.com/containerd/continuity/manifest.go b/vendor/github.com/containerd/continuity/manifest.go deleted file mode 100644 index 299fbcc..0000000 --- a/vendor/github.com/containerd/continuity/manifest.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "io" - "log" - "os" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/golang/protobuf/proto" -) - -// Manifest provides the contents of a manifest. Users of this struct should -// not typically modify any fields directly. -type Manifest struct { - // Resources specifies all the resources for a manifest in order by path. - Resources []Resource -} - -func Unmarshal(p []byte) (*Manifest, error) { - var bm pb.Manifest - - if err := proto.Unmarshal(p, &bm); err != nil { - return nil, err - } - - var m Manifest - for _, b := range bm.Resource { - r, err := fromProto(b) - if err != nil { - return nil, err - } - - m.Resources = append(m.Resources, r) - } - - return &m, nil -} - -func Marshal(m *Manifest) ([]byte, error) { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.Marshal(&bm) -} - -func MarshalText(w io.Writer, m *Manifest) error { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.MarshalText(w, &bm) -} - -// BuildManifest creates the manifest for the given context -func BuildManifest(ctx Context) (*Manifest, error) { - resourcesByPath := map[string]Resource{} - hardlinks := newHardlinkManager() - - if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("error walking %s: %v", p, err) - } - - if p == string(os.PathSeparator) { - // skip root - return nil - } - - resource, err := ctx.Resource(p, fi) - if err != nil { - if err == ErrNotFound { - return nil - } - log.Printf("error getting resource %q: %v", p, err) - return err - } - - // add to the hardlink manager - if err := hardlinks.Add(fi, resource); err == nil { - // Resource has been accepted by hardlink manager so we don't add - // it to the resourcesByPath until we merge at the end. - return nil - } else if err != errNotAHardLink { - // handle any other case where we have a proper error. - return fmt.Errorf("adding hardlink %s: %v", p, err) - } - - resourcesByPath[p] = resource - - return nil - }); err != nil { - return nil, err - } - - // merge and post-process the hardlinks. - // nolint:misspell - hardlinked, err := hardlinks.Merge() - if err != nil { - return nil, err - } - - // nolint:misspell - for _, resource := range hardlinked { - resourcesByPath[resource.Path()] = resource - } - - var resources []Resource - for _, resource := range resourcesByPath { - resources = append(resources, resource) - } - - sort.Stable(ByPath(resources)) - - return &Manifest{ - Resources: resources, - }, nil -} - -// VerifyManifest verifies all the resources in a manifest -// against files from the given context. -func VerifyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Verify(resource); err != nil { - return err - } - } - - return nil -} - -// ApplyManifest applies on the resources in a manifest to -// the given context. -func ApplyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Apply(resource); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go deleted file mode 100644 index b0d5a6b..0000000 --- a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pathdriver - -import ( - "path/filepath" -) - -// PathDriver provides all of the path manipulation functions in a common -// interface. The context should call these and never use the `filepath` -// package or any other package to manipulate paths. -type PathDriver interface { - Join(paths ...string) string - IsAbs(path string) bool - Rel(base, target string) (string, error) - Base(path string) string - Dir(path string) string - Clean(path string) string - Split(path string) (dir, file string) - Separator() byte - Abs(path string) (string, error) - Walk(string, filepath.WalkFunc) error - FromSlash(path string) string - ToSlash(path string) string - Match(pattern, name string) (matched bool, err error) -} - -// pathDriver is a simple default implementation calls the filepath package. -type pathDriver struct{} - -// LocalPathDriver is the exported pathDriver struct for convenience. -var LocalPathDriver PathDriver = &pathDriver{} - -func (*pathDriver) Join(paths ...string) string { - return filepath.Join(paths...) -} - -func (*pathDriver) IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -func (*pathDriver) Rel(base, target string) (string, error) { - return filepath.Rel(base, target) -} - -func (*pathDriver) Base(path string) string { - return filepath.Base(path) -} - -func (*pathDriver) Dir(path string) string { - return filepath.Dir(path) -} - -func (*pathDriver) Clean(path string) string { - return filepath.Clean(path) -} - -func (*pathDriver) Split(path string) (dir, file string) { - return filepath.Split(path) -} - -func (*pathDriver) Separator() byte { - return filepath.Separator -} - -func (*pathDriver) Abs(path string) (string, error) { - return filepath.Abs(path) -} - -// Note that filepath.Walk calls os.Stat, so if the context wants to -// to call Driver.Stat() for Walk, they need to create a new struct that -// overrides this method. -func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(root, walkFn) -} - -func (*pathDriver) FromSlash(path string) string { - return filepath.FromSlash(path) -} - -func (*pathDriver) ToSlash(path string) string { - return filepath.ToSlash(path) -} - -func (*pathDriver) Match(pattern, name string) (bool, error) { - return filepath.Match(pattern, name) -} diff --git a/vendor/github.com/containerd/continuity/proto/manifest.pb.go b/vendor/github.com/containerd/continuity/proto/manifest.pb.go deleted file mode 100644 index c83a9e7..0000000 --- a/vendor/github.com/containerd/continuity/proto/manifest.pb.go +++ /dev/null @@ -1,292 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: manifest.proto - -/* -Package proto is a generated protocol buffer package. - -It is generated from these files: - manifest.proto - -It has these top-level messages: - Manifest - Resource - XAttr - ADSEntry -*/ -package proto - -import proto1 "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto1.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package - -// Manifest specifies the entries in a container bundle, keyed and sorted by -// path. -type Manifest struct { - Resource []*Resource `protobuf:"bytes,1,rep,name=resource" json:"resource,omitempty"` -} - -func (m *Manifest) Reset() { *m = Manifest{} } -func (m *Manifest) String() string { return proto1.CompactTextString(m) } -func (*Manifest) ProtoMessage() {} -func (*Manifest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Manifest) GetResource() []*Resource { - if m != nil { - return m.Resource - } - return nil -} - -type Resource struct { - // Path specifies the path from the bundle root. If more than one - // path is present, the entry may represent a hardlink, rather than using - // a link target. The path format is operating system specific. - Path []string `protobuf:"bytes,1,rep,name=path" json:"path,omitempty"` - // Uid specifies the user id for the resource. - Uid int64 `protobuf:"varint,2,opt,name=uid" json:"uid,omitempty"` - // Gid specifies the group id for the resource. - Gid int64 `protobuf:"varint,3,opt,name=gid" json:"gid,omitempty"` - // user and group are not currently used but their field numbers have been - // reserved for future use. As such, they are marked as deprecated. - User string `protobuf:"bytes,4,opt,name=user" json:"user,omitempty"` - Group string `protobuf:"bytes,5,opt,name=group" json:"group,omitempty"` - // Mode defines the file mode and permissions. We've used the same - // bit-packing from Go's os package, - // http://golang.org/pkg/os/#FileMode, since they've done the work of - // creating a cross-platform layout. - Mode uint32 `protobuf:"varint,6,opt,name=mode" json:"mode,omitempty"` - // Size specifies the size in bytes of the resource. This is only valid - // for regular files. - Size uint64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"` - // Digest specifies the content digest of the target file. Only valid for - // regular files. The strings are formatted in OCI style, i.e. :. - // For detailed information about the format, please refer to OCI Image Spec: - // https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification - // The digests are sorted in lexical order and implementations may choose - // which algorithms they prefer. - Digest []string `protobuf:"bytes,8,rep,name=digest" json:"digest,omitempty"` - // Target defines the target of a hard or soft link. Absolute links start - // with a slash and specify the resource relative to the bundle root. - // Relative links do not start with a slash and are relative to the - // resource path. - Target string `protobuf:"bytes,9,opt,name=target" json:"target,omitempty"` - // Major specifies the major device number for character and block devices. - Major uint64 `protobuf:"varint,10,opt,name=major" json:"major,omitempty"` - // Minor specifies the minor device number for character and block devices. - Minor uint64 `protobuf:"varint,11,opt,name=minor" json:"minor,omitempty"` - // Xattr provides storage for extended attributes for the target resource. - Xattr []*XAttr `protobuf:"bytes,12,rep,name=xattr" json:"xattr,omitempty"` - // Ads stores one or more alternate data streams for the target resource. - Ads []*ADSEntry `protobuf:"bytes,13,rep,name=ads" json:"ads,omitempty"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto1.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Resource) GetPath() []string { - if m != nil { - return m.Path - } - return nil -} - -func (m *Resource) GetUid() int64 { - if m != nil { - return m.Uid - } - return 0 -} - -func (m *Resource) GetGid() int64 { - if m != nil { - return m.Gid - } - return 0 -} - -func (m *Resource) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *Resource) GetGroup() string { - if m != nil { - return m.Group - } - return "" -} - -func (m *Resource) GetMode() uint32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *Resource) GetSize() uint64 { - if m != nil { - return m.Size - } - return 0 -} - -func (m *Resource) GetDigest() []string { - if m != nil { - return m.Digest - } - return nil -} - -func (m *Resource) GetTarget() string { - if m != nil { - return m.Target - } - return "" -} - -func (m *Resource) GetMajor() uint64 { - if m != nil { - return m.Major - } - return 0 -} - -func (m *Resource) GetMinor() uint64 { - if m != nil { - return m.Minor - } - return 0 -} - -func (m *Resource) GetXattr() []*XAttr { - if m != nil { - return m.Xattr - } - return nil -} - -func (m *Resource) GetAds() []*ADSEntry { - if m != nil { - return m.Ads - } - return nil -} - -// XAttr encodes extended attributes for a resource. -type XAttr struct { - // Name specifies the attribute name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Data specifies the associated data for the attribute. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *XAttr) Reset() { *m = XAttr{} } -func (m *XAttr) String() string { return proto1.CompactTextString(m) } -func (*XAttr) ProtoMessage() {} -func (*XAttr) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *XAttr) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *XAttr) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -// ADSEntry encodes information for a Windows Alternate Data Stream. -type ADSEntry struct { - // Name specifices the stream name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Data specifies the stream data. - // See also the description about the digest below. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - // Digest is a CAS representation of the stream data. - // - // At least one of data or digest MUST be specified, and either one of them - // SHOULD be specified. - // - // How to access the actual data using the digest is implementation-specific, - // and implementations can choose not to implement digest. - // So, digest SHOULD be used only when the stream data is large. - Digest string `protobuf:"bytes,3,opt,name=digest" json:"digest,omitempty"` -} - -func (m *ADSEntry) Reset() { *m = ADSEntry{} } -func (m *ADSEntry) String() string { return proto1.CompactTextString(m) } -func (*ADSEntry) ProtoMessage() {} -func (*ADSEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *ADSEntry) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ADSEntry) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ADSEntry) GetDigest() string { - if m != nil { - return m.Digest - } - return "" -} - -func init() { - proto1.RegisterType((*Manifest)(nil), "proto.Manifest") - proto1.RegisterType((*Resource)(nil), "proto.Resource") - proto1.RegisterType((*XAttr)(nil), "proto.XAttr") - proto1.RegisterType((*ADSEntry)(nil), "proto.ADSEntry") -} - -func init() { proto1.RegisterFile("manifest.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, - 0x10, 0xc6, 0x49, 0x93, 0xf4, 0x4d, 0xa7, 0xed, 0xab, 0x2c, 0x52, 0xe6, 0x18, 0x73, 0x0a, 0x08, - 0x15, 0xf4, 0xe0, 0xb9, 0xa2, 0x17, 0xc1, 0xcb, 0x7a, 0xf1, 0xba, 0xba, 0x6b, 0x5c, 0x21, 0xd9, - 0xb0, 0xd9, 0x80, 0xfa, 0xe5, 0xfc, 0x6a, 0x32, 0xb3, 0x69, 0xd1, 0x9b, 0xa7, 0x3c, 0xcf, 0x6f, - 0xfe, 0x64, 0xf6, 0x81, 0xff, 0xad, 0xea, 0xec, 0x8b, 0x19, 0xc2, 0xb6, 0xf7, 0x2e, 0x38, 0x91, - 0xf3, 0xa7, 0xba, 0x82, 0xe2, 0x7e, 0x2a, 0x88, 0x33, 0x28, 0xbc, 0x19, 0xdc, 0xe8, 0x9f, 0x0d, - 0x26, 0x65, 0x5a, 0x2f, 0x2f, 0x8e, 0x62, 0xf3, 0x56, 0x4e, 0x58, 0x1e, 0x1a, 0xaa, 0xaf, 0x19, - 0x14, 0x7b, 0x2c, 0x04, 0x64, 0xbd, 0x0a, 0xaf, 0x3c, 0xb5, 0x90, 0xac, 0xc5, 0x31, 0xa4, 0xa3, - 0xd5, 0x38, 0x2b, 0x93, 0x3a, 0x95, 0x24, 0x89, 0x34, 0x56, 0x63, 0x1a, 0x49, 0x63, 0xb5, 0xd8, - 0x40, 0x36, 0x0e, 0xc6, 0x63, 0x56, 0x26, 0xf5, 0xe2, 0x7a, 0x86, 0x89, 0x64, 0x2f, 0x10, 0xf2, - 0xc6, 0xbb, 0xb1, 0xc7, 0xfc, 0x50, 0x88, 0x80, 0xfe, 0xd4, 0x3a, 0x6d, 0x70, 0x5e, 0x26, 0xf5, - 0x5a, 0xb2, 0x26, 0x36, 0xd8, 0x4f, 0x83, 0xff, 0xca, 0xa4, 0xce, 0x24, 0x6b, 0xb1, 0x81, 0xb9, - 0xb6, 0x8d, 0x19, 0x02, 0x16, 0x7c, 0xd3, 0xe4, 0x88, 0x07, 0xe5, 0x1b, 0x13, 0x70, 0x41, 0xab, - 0xe5, 0xe4, 0xc4, 0x09, 0xe4, 0xad, 0x7a, 0x73, 0x1e, 0x81, 0x97, 0x44, 0xc3, 0xd4, 0x76, 0xce, - 0xe3, 0x72, 0xa2, 0x64, 0x44, 0x05, 0xf9, 0xbb, 0x0a, 0xc1, 0xe3, 0x8a, 0x43, 0x5a, 0x4d, 0x21, - 0x3d, 0xee, 0x42, 0xf0, 0x32, 0x96, 0xc4, 0x29, 0xa4, 0x4a, 0x0f, 0xb8, 0xfe, 0x15, 0xe3, 0xee, - 0xe6, 0xe1, 0xb6, 0x0b, 0xfe, 0x43, 0x52, 0xad, 0x3a, 0x87, 0x9c, 0x47, 0xe8, 0xfe, 0x4e, 0xb5, - 0x94, 0x39, 0x5d, 0xc4, 0x9a, 0x98, 0x56, 0x41, 0x71, 0x7c, 0x2b, 0xc9, 0xba, 0xba, 0x83, 0x62, - 0xbf, 0xe1, 0xaf, 0x33, 0x3f, 0x72, 0x48, 0xe3, 0x7b, 0xa3, 0x7b, 0x9a, 0xf3, 0x45, 0x97, 0xdf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xef, 0x27, 0x99, 0xf7, 0x17, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/continuity/proto/manifest.proto b/vendor/github.com/containerd/continuity/proto/manifest.proto deleted file mode 100644 index 66ef80f..0000000 --- a/vendor/github.com/containerd/continuity/proto/manifest.proto +++ /dev/null @@ -1,97 +0,0 @@ -syntax = "proto3"; - -package proto; - -// Manifest specifies the entries in a container bundle, keyed and sorted by -// path. -message Manifest { - repeated Resource resource = 1; -} - -message Resource { - // Path specifies the path from the bundle root. If more than one - // path is present, the entry may represent a hardlink, rather than using - // a link target. The path format is operating system specific. - repeated string path = 1; - - // NOTE(stevvooe): Need to define clear precedence for user/group/uid/gid precedence. - - // Uid specifies the user id for the resource. - int64 uid = 2; - - // Gid specifies the group id for the resource. - int64 gid = 3; - - // user and group are not currently used but their field numbers have been - // reserved for future use. As such, they are marked as deprecated. - string user = 4 [deprecated=true]; // "deprecated" stands for "reserved" here - string group = 5 [deprecated=true]; // "deprecated" stands for "reserved" here - - // Mode defines the file mode and permissions. We've used the same - // bit-packing from Go's os package, - // http://golang.org/pkg/os/#FileMode, since they've done the work of - // creating a cross-platform layout. - uint32 mode = 6; - - // NOTE(stevvooe): Beyond here, we start defining type specific fields. - - // Size specifies the size in bytes of the resource. This is only valid - // for regular files. - uint64 size = 7; - - // Digest specifies the content digest of the target file. Only valid for - // regular files. The strings are formatted in OCI style, i.e. :. - // For detailed information about the format, please refer to OCI Image Spec: - // https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification - // The digests are sorted in lexical order and implementations may choose - // which algorithms they prefer. - repeated string digest = 8; - - // Target defines the target of a hard or soft link. Absolute links start - // with a slash and specify the resource relative to the bundle root. - // Relative links do not start with a slash and are relative to the - // resource path. - string target = 9; - - // Major specifies the major device number for character and block devices. - uint64 major = 10; - - // Minor specifies the minor device number for character and block devices. - uint64 minor = 11; - - // Xattr provides storage for extended attributes for the target resource. - repeated XAttr xattr = 12; - - // Ads stores one or more alternate data streams for the target resource. - repeated ADSEntry ads = 13; - -} - -// XAttr encodes extended attributes for a resource. -message XAttr { - // Name specifies the attribute name. - string name = 1; - - // Data specifies the associated data for the attribute. - bytes data = 2; -} - -// ADSEntry encodes information for a Windows Alternate Data Stream. -message ADSEntry { - // Name specifices the stream name. - string name = 1; - - // Data specifies the stream data. - // See also the description about the digest below. - bytes data = 2; - - // Digest is a CAS representation of the stream data. - // - // At least one of data or digest MUST be specified, and either one of them - // SHOULD be specified. - // - // How to access the actual data using the digest is implementation-specific, - // and implementations can choose not to implement digest. - // So, digest SHOULD be used only when the stream data is large. - string digest = 3; -} diff --git a/vendor/github.com/containerd/continuity/resource.go b/vendor/github.com/containerd/continuity/resource.go deleted file mode 100644 index d2f52bd..0000000 --- a/vendor/github.com/containerd/continuity/resource.go +++ /dev/null @@ -1,590 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "errors" - "fmt" - "os" - "reflect" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/opencontainers/go-digest" -) - -// TODO(stevvooe): A record based model, somewhat sketched out at the bottom -// of this file, will be more flexible. Another possibly is to tie the package -// interface directly to the protobuf type. This will have efficiency -// advantages at the cost coupling the nasty codegen types to the exported -// interface. - -type Resource interface { - // Path provides the primary resource path relative to the bundle root. In - // cases where resources have more than one path, such as with hard links, - // this will return the primary path, which is often just the first entry. - Path() string - - // Mode returns the - Mode() os.FileMode - - UID() int64 - GID() int64 -} - -// ByPath provides the canonical sort order for a set of resources. Use with -// sort.Stable for deterministic sorting. -type ByPath []Resource - -func (bp ByPath) Len() int { return len(bp) } -func (bp ByPath) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] } -func (bp ByPath) Less(i, j int) bool { return bp[i].Path() < bp[j].Path() } - -type XAttrer interface { - XAttrs() map[string][]byte -} - -// Hardlinkable is an interface that a resource type satisfies if it can be a -// hardlink target. -type Hardlinkable interface { - // Paths returns all paths of the resource, including the primary path - // returned by Resource.Path. If len(Paths()) > 1, the resource is a hard - // link. - Paths() []string -} - -type RegularFile interface { - Resource - XAttrer - Hardlinkable - - Size() int64 - Digests() []digest.Digest -} - -// Merge two or more Resources into new file. Typically, this should be -// used to merge regular files as hardlinks. If the files are not identical, -// other than Paths and Digests, the merge will fail and an error will be -// returned. -func Merge(fs ...Resource) (Resource, error) { - if len(fs) < 1 { - return nil, fmt.Errorf("please provide a resource to merge") - } - - if len(fs) == 1 { - return fs[0], nil - } - - var paths []string - var digests []digest.Digest - bypath := map[string][]Resource{} - - // The attributes are all compared against the first to make sure they - // agree before adding to the above collections. If any of these don't - // correctly validate, the merge fails. - prototype := fs[0] - xattrs := make(map[string][]byte) - - // initialize xattrs for use below. All files must have same xattrs. - if prototypeXAttrer, ok := prototype.(XAttrer); ok { - for attr, value := range prototypeXAttrer.XAttrs() { - xattrs[attr] = value - } - } - - for _, f := range fs { - h, isHardlinkable := f.(Hardlinkable) - if !isHardlinkable { - return nil, errNotAHardLink - } - - if f.Mode() != prototype.Mode() { - return nil, fmt.Errorf("modes do not match: %v != %v", f.Mode(), prototype.Mode()) - } - - if f.UID() != prototype.UID() { - return nil, fmt.Errorf("uid does not match: %v != %v", f.UID(), prototype.UID()) - } - - if f.GID() != prototype.GID() { - return nil, fmt.Errorf("gid does not match: %v != %v", f.GID(), prototype.GID()) - } - - if xattrer, ok := f.(XAttrer); ok { - fxattrs := xattrer.XAttrs() - if !reflect.DeepEqual(fxattrs, xattrs) { - return nil, fmt.Errorf("resource %q xattrs do not match: %v != %v", f, fxattrs, xattrs) - } - } - - for _, p := range h.Paths() { - pfs, ok := bypath[p] - if !ok { - // ensure paths are unique by only appending on a new path. - paths = append(paths, p) - } - - bypath[p] = append(pfs, f) - } - - if regFile, isRegFile := f.(RegularFile); isRegFile { - prototypeRegFile, prototypeIsRegFile := prototype.(RegularFile) - if !prototypeIsRegFile { - return nil, errors.New("prototype is not a regular file") - } - - if regFile.Size() != prototypeRegFile.Size() { - return nil, fmt.Errorf("size does not match: %v != %v", regFile.Size(), prototypeRegFile.Size()) - } - - digests = append(digests, regFile.Digests()...) - } else if device, isDevice := f.(Device); isDevice { - prototypeDevice, prototypeIsDevice := prototype.(Device) - if !prototypeIsDevice { - return nil, errors.New("prototype is not a device") - } - - if device.Major() != prototypeDevice.Major() { - return nil, fmt.Errorf("major number does not match: %v != %v", device.Major(), prototypeDevice.Major()) - } - if device.Minor() != prototypeDevice.Minor() { - return nil, fmt.Errorf("minor number does not match: %v != %v", device.Minor(), prototypeDevice.Minor()) - } - } else if _, isNamedPipe := f.(NamedPipe); isNamedPipe { - _, prototypeIsNamedPipe := prototype.(NamedPipe) - if !prototypeIsNamedPipe { - return nil, errors.New("prototype is not a named pipe") - } - } else { - return nil, errNotAHardLink - } - } - - sort.Stable(sort.StringSlice(paths)) - - // Choose a "canonical" file. Really, it is just the first file to sort - // against. We also effectively select the very first digest as the - // "canonical" one for this file. - first := bypath[paths[0]][0] - - resource := resource{ - paths: paths, - mode: first.Mode(), - uid: first.UID(), - gid: first.GID(), - xattrs: xattrs, - } - - switch typedF := first.(type) { - case RegularFile: - var err error - digests, err = uniqifyDigests(digests...) - if err != nil { - return nil, err - } - - return ®ularFile{ - resource: resource, - size: typedF.Size(), - digests: digests, - }, nil - case Device: - return &device{ - resource: resource, - major: typedF.Major(), - minor: typedF.Minor(), - }, nil - - case NamedPipe: - return &namedPipe{ - resource: resource, - }, nil - - default: - return nil, errNotAHardLink - } -} - -type Directory interface { - Resource - XAttrer - - // Directory is a no-op method to identify directory objects by interface. - Directory() -} - -type SymLink interface { - Resource - - // Target returns the target of the symlink contained in the . - Target() string -} - -type NamedPipe interface { - Resource - Hardlinkable - XAttrer - - // Pipe is a no-op method to allow consistent resolution of NamedPipe - // interface. - Pipe() -} - -type Device interface { - Resource - Hardlinkable - XAttrer - - Major() uint64 - Minor() uint64 -} - -type resource struct { - paths []string - mode os.FileMode - uid, gid int64 - xattrs map[string][]byte -} - -var _ Resource = &resource{} - -func (r *resource) Path() string { - if len(r.paths) < 1 { - return "" - } - - return r.paths[0] -} - -func (r *resource) Mode() os.FileMode { - return r.mode -} - -func (r *resource) UID() int64 { - return r.uid -} - -func (r *resource) GID() int64 { - return r.gid -} - -type regularFile struct { - resource - size int64 - digests []digest.Digest -} - -var _ RegularFile = ®ularFile{} - -// newRegularFile returns the RegularFile, using the populated base resource -// and one or more digests of the content. -func newRegularFile(base resource, paths []string, size int64, dgsts ...digest.Digest) (RegularFile, error) { - if !base.Mode().IsRegular() { - return nil, fmt.Errorf("not a regular file") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - // make our own copy of digests - ds := make([]digest.Digest, len(dgsts)) - copy(ds, dgsts) - - return ®ularFile{ - resource: base, - size: size, - digests: ds, - }, nil -} - -func (rf *regularFile) Paths() []string { - paths := make([]string, len(rf.paths)) - copy(paths, rf.paths) - return paths -} - -func (rf *regularFile) Size() int64 { - return rf.size -} - -func (rf *regularFile) Digests() []digest.Digest { - digests := make([]digest.Digest, len(rf.digests)) - copy(digests, rf.digests) - return digests -} - -func (rf *regularFile) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(rf.xattrs)) - - for attr, value := range rf.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type directory struct { - resource -} - -var _ Directory = &directory{} - -func newDirectory(base resource) (Directory, error) { - if !base.Mode().IsDir() { - return nil, fmt.Errorf("not a directory") - } - - return &directory{ - resource: base, - }, nil -} - -func (d *directory) Directory() {} - -func (d *directory) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type symLink struct { - resource - target string -} - -var _ SymLink = &symLink{} - -func newSymLink(base resource, target string) (SymLink, error) { - if base.Mode()&os.ModeSymlink == 0 { - return nil, fmt.Errorf("not a symlink") - } - - return &symLink{ - resource: base, - target: target, - }, nil -} - -func (l *symLink) Target() string { - return l.target -} - -type namedPipe struct { - resource -} - -var _ NamedPipe = &namedPipe{} - -func newNamedPipe(base resource, paths []string) (NamedPipe, error) { - if base.Mode()&os.ModeNamedPipe == 0 { - return nil, fmt.Errorf("not a namedpipe") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &namedPipe{ - resource: base, - }, nil -} - -func (np *namedPipe) Pipe() {} - -func (np *namedPipe) Paths() []string { - paths := make([]string, len(np.paths)) - copy(paths, np.paths) - return paths -} - -func (np *namedPipe) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(np.xattrs)) - - for attr, value := range np.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type device struct { - resource - major, minor uint64 -} - -var _ Device = &device{} - -func newDevice(base resource, paths []string, major, minor uint64) (Device, error) { - if base.Mode()&os.ModeDevice == 0 { - return nil, fmt.Errorf("not a device") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &device{ - resource: base, - major: major, - minor: minor, - }, nil -} - -func (d *device) Paths() []string { - paths := make([]string, len(d.paths)) - copy(paths, d.paths) - return paths -} - -func (d *device) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -func (d device) Major() uint64 { - return d.major -} - -func (d device) Minor() uint64 { - return d.minor -} - -// toProto converts a resource to a protobuf record. We'd like to push this -// the individual types but we want to keep this all together during -// prototyping. -func toProto(resource Resource) *pb.Resource { - b := &pb.Resource{ - Path: []string{resource.Path()}, - Mode: uint32(resource.Mode()), - Uid: resource.UID(), - Gid: resource.GID(), - } - - if xattrer, ok := resource.(XAttrer); ok { - // Sorts the XAttrs by name for consistent ordering. - keys := []string{} - xattrs := xattrer.XAttrs() - for k := range xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - b.Xattr = append(b.Xattr, &pb.XAttr{Name: k, Data: xattrs[k]}) - } - } - - switch r := resource.(type) { - case RegularFile: - b.Path = r.Paths() - b.Size = uint64(r.Size()) - - for _, dgst := range r.Digests() { - b.Digest = append(b.Digest, dgst.String()) - } - case SymLink: - b.Target = r.Target() - case Device: - b.Major, b.Minor = r.Major(), r.Minor() - b.Path = r.Paths() - case NamedPipe: - b.Path = r.Paths() - } - - // enforce a few stability guarantees that may not be provided by the - // resource implementation. - sort.Strings(b.Path) - - return b -} - -// fromProto converts from a protobuf Resource to a Resource interface. -func fromProto(b *pb.Resource) (Resource, error) { - base := &resource{ - paths: b.Path, - mode: os.FileMode(b.Mode), - uid: b.Uid, - gid: b.Gid, - } - - base.xattrs = make(map[string][]byte, len(b.Xattr)) - - for _, attr := range b.Xattr { - base.xattrs[attr.Name] = attr.Data - } - - switch { - case base.Mode().IsRegular(): - dgsts := make([]digest.Digest, len(b.Digest)) - for i, dgst := range b.Digest { - // TODO(stevvooe): Should we be validating at this point? - dgsts[i] = digest.Digest(dgst) - } - - return newRegularFile(*base, b.Path, int64(b.Size), dgsts...) - case base.Mode().IsDir(): - return newDirectory(*base) - case base.Mode()&os.ModeSymlink != 0: - return newSymLink(*base, b.Target) - case base.Mode()&os.ModeNamedPipe != 0: - return newNamedPipe(*base, b.Path) - case base.Mode()&os.ModeDevice != 0: - return newDevice(*base, b.Path, b.Major, b.Minor) - } - - return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode()) -} - -// NOTE(stevvooe): An alternative model that supports inline declaration. -// Convenient for unit testing where inline declarations may be desirable but -// creates an awkward API for the standard use case. - -// type ResourceKind int - -// const ( -// ResourceRegularFile = iota + 1 -// ResourceDirectory -// ResourceSymLink -// Resource -// ) - -// type Resource struct { -// Kind ResourceKind -// Paths []string -// Mode os.FileMode -// UID string -// GID string -// Size int64 -// Digests []digest.Digest -// Target string -// Major, Minor int -// XAttrs map[string][]byte -// } - -// type RegularFile struct { -// Paths []string -// Size int64 -// Digests []digest.Digest -// Perm os.FileMode // os.ModePerm + sticky, setuid, setgid -// } diff --git a/vendor/github.com/containerd/continuity/resource_unix.go b/vendor/github.com/containerd/continuity/resource_unix.go deleted file mode 100644 index 0e103cc..0000000 --- a/vendor/github.com/containerd/continuity/resource_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - // TODO(stevvooe): This need to be resolved for the container's root, - // where here we are really getting the host OS's value. We need to allow - // this be passed in and fixed up to make these uid/gid mappings portable. - // Either this can be part of the driver or we can achieve it through some - // other mechanism. - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - // TODO(stevvooe): This may not be a hard error for all platforms. We - // may want to move this to the driver. - return nil, fmt.Errorf("unable to resolve syscall.Stat_t from (os.FileInfo).Sys(): %#v", fi) - } - - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - - uid: int64(sys.Uid), - gid: int64(sys.Gid), - - // NOTE(stevvooe): Population of shared xattrs field is deferred to - // the resource types that populate it. Since they are a property of - // the context, they must set there. - }, nil -} diff --git a/vendor/github.com/containerd/continuity/resource_windows.go b/vendor/github.com/containerd/continuity/resource_windows.go deleted file mode 100644 index f980180..0000000 --- a/vendor/github.com/containerd/continuity/resource_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import "os" - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - }, nil -} diff --git a/vendor/github.com/containerd/continuity/sysx/README.md b/vendor/github.com/containerd/continuity/sysx/README.md deleted file mode 100644 index ad7aee5..0000000 --- a/vendor/github.com/containerd/continuity/sysx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This package is for internal use only. It is intended to only have -temporary changes before they are upstreamed to golang.org/x/sys/ -(a.k.a. https://github.com/golang/sys). diff --git a/vendor/github.com/containerd/continuity/sysx/generate.sh b/vendor/github.com/containerd/continuity/sysx/generate.sh deleted file mode 100644 index 87d708d..0000000 --- a/vendor/github.com/containerd/continuity/sysx/generate.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -e - -mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl" - -fix() { - sed 's,^package syscall$,package sysx,' \ - | sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \ - | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ - | gofmt -r='Syscall6 -> syscall.Syscall6' \ - | gofmt -r='Syscall -> syscall.Syscall' \ - | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ - | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ - | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ - | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ - | gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \ - | gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \ - | gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \ - | gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR' -} - -if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then - echo "Must specify \$GOARCH and \$GOOS" - exit 1 -fi - -mkargs="" - -if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then - mkargs="-l32" -fi - -for f in "$@"; do - $mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go" -done - diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go deleted file mode 100644 index e0575f4..0000000 --- a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "syscall" -) - -// This should actually be a set that contains ENOENT and EPERM -const ENODATA = syscall.ENOENT diff --git a/vendor/github.com/containerd/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go deleted file mode 100644 index db6fe70..0000000 --- a/vendor/github.com/containerd/continuity/sysx/xattr.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build linux darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "bytes" - - "golang.org/x/sys/unix" -) - -// Listxattr calls syscall listxattr and reads all content -// and returns a string array -func Listxattr(path string) ([]string, error) { - return listxattrAll(path, unix.Listxattr) -} - -// Removexattr calls syscall removexattr -func Removexattr(path string, attr string) (err error) { - return unix.Removexattr(path, attr) -} - -// Setxattr calls syscall setxattr -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - return unix.Setxattr(path, attr, data, flags) -} - -// Getxattr calls syscall getxattr -func Getxattr(path, attr string) ([]byte, error) { - return getxattrAll(path, attr, unix.Getxattr) -} - -// LListxattr lists xattrs, not following symlinks -func LListxattr(path string) ([]string, error) { - return listxattrAll(path, unix.Llistxattr) -} - -// LRemovexattr removes an xattr, not following symlinks -func LRemovexattr(path string, attr string) (err error) { - return unix.Lremovexattr(path, attr) -} - -// LSetxattr sets an xattr, not following symlinks -func LSetxattr(path string, attr string, data []byte, flags int) (err error) { - return unix.Lsetxattr(path, attr, data, flags) -} - -// LGetxattr gets an xattr, not following symlinks -func LGetxattr(path, attr string) ([]byte, error) { - return getxattrAll(path, attr, unix.Lgetxattr) -} - -const defaultXattrBufferSize = 128 - -type listxattrFunc func(path string, dest []byte) (int, error) - -func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { - buf := make([]byte, defaultXattrBufferSize) - n, err := listFunc(path, buf) - for err == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - n, err = listFunc(path, []byte{}) - if err != nil { - return nil, err - } - buf = make([]byte, n) - n, err = listFunc(path, buf) - } - if err != nil { - return nil, err - } - - ps := bytes.Split(bytes.TrimSuffix(buf[:n], []byte{0}), []byte{0}) - var entries []string - for _, p := range ps { - if len(p) > 0 { - entries = append(entries, string(p)) - } - } - - return entries, nil -} - -type getxattrFunc func(string, string, []byte) (int, error) - -func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { - buf := make([]byte, defaultXattrBufferSize) - n, err := getFunc(path, attr, buf) - for err == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - n, err = getFunc(path, attr, []byte{}) - if err != nil { - return nil, err - } - buf = make([]byte, n) - n, err = getFunc(path, attr, buf) - } - if err != nil { - return nil, err - } - return buf[:n], nil -} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go deleted file mode 100644 index f8fa8c6..0000000 --- a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !linux,!darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "errors" - "runtime" -) - -var errUnsupported = errors.New("extended attributes unsupported on " + runtime.GOOS) - -// Listxattr calls syscall listxattr and reads all content -// and returns a string array -func Listxattr(path string) ([]string, error) { - return []string{}, nil -} - -// Removexattr calls syscall removexattr -func Removexattr(path string, attr string) (err error) { - return errUnsupported -} - -// Setxattr calls syscall setxattr -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - return errUnsupported -} - -// Getxattr calls syscall getxattr -func Getxattr(path, attr string) ([]byte, error) { - return []byte{}, errUnsupported -} - -// LListxattr lists xattrs, not following symlinks -func LListxattr(path string) ([]string, error) { - return []string{}, nil -} - -// LRemovexattr removes an xattr, not following symlinks -func LRemovexattr(path string, attr string) (err error) { - return errUnsupported -} - -// LSetxattr sets an xattr, not following symlinks -func LSetxattr(path string, attr string, data []byte, flags int) (err error) { - return errUnsupported -} - -// LGetxattr gets an xattr, not following symlinks -func LGetxattr(path, attr string) ([]byte, error) { - return []byte{}, nil -} diff --git a/vendor/github.com/containerd/continuity/testutil/helpers_unix.go b/vendor/github.com/containerd/continuity/testutil/helpers_unix.go deleted file mode 100644 index 35568a4..0000000 --- a/vendor/github.com/containerd/continuity/testutil/helpers_unix.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package testutil - -import ( - "os" - "testing" - - "golang.org/x/sys/unix" -) - -// Unmount unmounts a given mountPoint and sets t.Error if it fails -func Unmount(t *testing.T, mountPoint string) { - t.Log("unmount", mountPoint) - if err := unmountAll(mountPoint); err != nil { - t.Error("Could not umount", mountPoint, err) - } -} - -// RequiresRoot skips tests that require root, unless the test.root flag has -// been set -func RequiresRoot(t testing.TB) { - if !rootEnabled { - t.Skip("skipping test that requires root") - return - } - if os.Getuid() != 0 { - t.Error("This test must be run as root.") - } -} - -func unmountAll(mountpoint string) error { - for { - if err := unix.Unmount(mountpoint, unmountFlags); err != nil { - if err == unix.EINVAL { - return nil - } - return err - } - } -} diff --git a/vendor/github.com/containerd/continuity/testutil/helpers_windows.go b/vendor/github.com/containerd/continuity/testutil/helpers_windows.go deleted file mode 100644 index 203d987..0000000 --- a/vendor/github.com/containerd/continuity/testutil/helpers_windows.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package testutil - -import "testing" - -// RequiresRoot does nothing on Windows -func RequiresRoot(t testing.TB) { -} - -// RequiresRootM is similar to RequiresRoot but intended to be called from *testing.M. -func RequiresRootM() { -} - -// Unmount unmounts a given mountPoint and sets t.Error if it fails -// Does nothing on Windows -func Unmount(t *testing.T, mountPoint string) { -} diff --git a/vendor/github.com/containerd/continuity/testutil/loopback/loopback_linux.go b/vendor/github.com/containerd/continuity/testutil/loopback/loopback_linux.go deleted file mode 100644 index c175004..0000000 --- a/vendor/github.com/containerd/continuity/testutil/loopback/loopback_linux.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package loopback - -import ( - "io/ioutil" - "os" - "os/exec" - "strings" - "syscall" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// New creates a loopback device -func New(size int64) (*Loopback, error) { - // create temporary file for the disk image - file, err := ioutil.TempFile("", "containerd-test-loopback") - if err != nil { - return nil, errors.Wrap(err, "could not create temporary file for loopback") - } - - if err := file.Truncate(size); err != nil { - file.Close() - os.Remove(file.Name()) - return nil, errors.Wrap(err, "failed to resize temp file") - } - file.Close() - - // create device - losetup := exec.Command("losetup", "--find", "--show", file.Name()) - p, err := losetup.Output() - if err != nil { - os.Remove(file.Name()) - return nil, errors.Wrap(err, "loopback setup failed") - } - - deviceName := strings.TrimSpace(string(p)) - logrus.Debugf("Created loop device %s (using %s)", deviceName, file.Name()) - - cleanup := func() error { - // detach device - logrus.Debugf("Removing loop device %s", deviceName) - losetup := exec.Command("losetup", "--detach", deviceName) - err := losetup.Run() - if err != nil { - return errors.Wrapf(err, "Could not remove loop device %s", deviceName) - } - - // remove file - logrus.Debugf("Removing temporary file %s", file.Name()) - return os.Remove(file.Name()) - } - - l := Loopback{ - File: file.Name(), - Device: deviceName, - close: cleanup, - } - return &l, nil -} - -// Loopback device -type Loopback struct { - // File is the underlying sparse file - File string - // Device is /dev/loopX - Device string - close func() error -} - -// SoftSize returns st_size -func (l *Loopback) SoftSize() (int64, error) { - st, err := os.Stat(l.File) - if err != nil { - return 0, err - } - return st.Size(), nil -} - -// HardSize returns st_blocks * 512; see stat(2) -func (l *Loopback) HardSize() (int64, error) { - st, err := os.Stat(l.File) - if err != nil { - return 0, err - } - st2, ok := st.Sys().(*syscall.Stat_t) - if !ok { - return 0, errors.New("st.Sys() is not a *syscall.Stat_t") - } - // NOTE: st_blocks has nothing to do with st_blksize; see stat(2) - return st2.Blocks * 512, nil -} - -// Close detaches the device and removes the underlying file -func (l *Loopback) Close() error { - return l.close() -} diff --git a/vendor/github.com/containerd/continuity/testutil/mount_linux.go b/vendor/github.com/containerd/continuity/testutil/mount_linux.go deleted file mode 100644 index 3c453cf..0000000 --- a/vendor/github.com/containerd/continuity/testutil/mount_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package testutil - -import "golang.org/x/sys/unix" - -const unmountFlags int = unix.MNT_DETACH diff --git a/vendor/github.com/containerd/continuity/testutil/mount_other.go b/vendor/github.com/containerd/continuity/testutil/mount_other.go deleted file mode 100644 index 0c7fde5..0000000 --- a/vendor/github.com/containerd/continuity/testutil/mount_other.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !linux,!windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package testutil - -const unmountFlags int = 0 diff --git a/vendor/github.com/containerd/fifo/.gitattributes b/vendor/github.com/containerd/fifo/.gitattributes deleted file mode 100644 index d207b18..0000000 --- a/vendor/github.com/containerd/fifo/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf diff --git a/vendor/github.com/containerd/fifo/.gitignore b/vendor/github.com/containerd/fifo/.gitignore deleted file mode 100644 index 7f7bd6a..0000000 --- a/vendor/github.com/containerd/fifo/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -coverage.txt -vendor/ diff --git a/vendor/github.com/containerd/fifo/.golangci.yml b/vendor/github.com/containerd/fifo/.golangci.yml deleted file mode 100644 index fcba5e8..0000000 --- a/vendor/github.com/containerd/fifo/.golangci.yml +++ /dev/null @@ -1,20 +0,0 @@ -linters: - enable: - - structcheck - - varcheck - - staticcheck - - unconvert - - gofmt - - goimports - - golint - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -run: - timeout: 3m - skip-dirs: - - vendor diff --git a/vendor/github.com/containerd/fifo/LICENSE b/vendor/github.com/containerd/fifo/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/containerd/fifo/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/fifo/Makefile b/vendor/github.com/containerd/fifo/Makefile deleted file mode 100644 index 40c5046..0000000 --- a/vendor/github.com/containerd/fifo/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -.PHONY: check test deps - -test: deps - go test -v -race ./... - -deps: - go mod vendor - -check: - GOGC=75 golangci-lint run diff --git a/vendor/github.com/containerd/fifo/errors.go b/vendor/github.com/containerd/fifo/errors.go deleted file mode 100644 index 50f73b2..0000000 --- a/vendor/github.com/containerd/fifo/errors.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fifo - -import "errors" - -var ( - ErrClosed = errors.New("fifo closed") - ErrCtrlClosed = errors.New("control of closed fifo") - ErrRdFrmWRONLY = errors.New("reading from write-only fifo") - ErrReadClosed = errors.New("reading from a closed fifo") - ErrWrToRDONLY = errors.New("writing to read-only fifo") - ErrWriteClosed = errors.New("writing to a closed fifo") -) diff --git a/vendor/github.com/containerd/fifo/fifo.go b/vendor/github.com/containerd/fifo/fifo.go deleted file mode 100644 index 45a9b38..0000000 --- a/vendor/github.com/containerd/fifo/fifo.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fifo - -import ( - "context" - "io" - "os" - "runtime" - "sync" - "syscall" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -type fifo struct { - flag int - opened chan struct{} - closed chan struct{} - closing chan struct{} - err error - file *os.File - closingOnce sync.Once // close has been called - closedOnce sync.Once // fifo is closed - handle *handle -} - -var leakCheckWg *sync.WaitGroup - -// OpenFifoDup2 is same as OpenFifo, but additionally creates a copy of the FIFO file descriptor with dup2 syscall. -func OpenFifoDup2(ctx context.Context, fn string, flag int, perm os.FileMode, fd int) (io.ReadWriteCloser, error) { - f, err := openFifo(ctx, fn, flag, perm) - if err != nil { - return nil, errors.Wrap(err, "fifo error") - } - - if err := unix.Dup2(int(f.file.Fd()), fd); err != nil { - _ = f.Close() - return nil, errors.Wrap(err, "dup2 error") - } - - return f, nil -} - -// OpenFifo opens a fifo. Returns io.ReadWriteCloser. -// Context can be used to cancel this function until open(2) has not returned. -// Accepted flags: -// - syscall.O_CREAT - create new fifo if one doesn't exist -// - syscall.O_RDONLY - open fifo only from reader side -// - syscall.O_WRONLY - open fifo only from writer side -// - syscall.O_RDWR - open fifo from both sides, never block on syscall level -// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the -// fifo isn't open. read/write will be connected after the actual fifo is -// open or after fifo is closed. -func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) { - return openFifo(ctx, fn, flag, perm) -} - -func openFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (*fifo, error) { - if _, err := os.Stat(fn); err != nil { - if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 { - if err := syscall.Mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) { - return nil, errors.Wrapf(err, "error creating fifo %v", fn) - } - } else { - return nil, err - } - } - - block := flag&syscall.O_NONBLOCK == 0 || flag&syscall.O_RDWR != 0 - - flag &= ^syscall.O_CREAT - flag &= ^syscall.O_NONBLOCK - - h, err := getHandle(fn) - if err != nil { - return nil, err - } - - f := &fifo{ - handle: h, - flag: flag, - opened: make(chan struct{}), - closed: make(chan struct{}), - closing: make(chan struct{}), - } - - wg := leakCheckWg - if wg != nil { - wg.Add(2) - } - - go func() { - if wg != nil { - defer wg.Done() - } - select { - case <-ctx.Done(): - select { - case <-f.opened: - default: - f.Close() - } - case <-f.opened: - case <-f.closed: - } - }() - go func() { - if wg != nil { - defer wg.Done() - } - var file *os.File - fn, err := h.Path() - if err == nil { - file, err = os.OpenFile(fn, flag, 0) - } - select { - case <-f.closing: - if err == nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - err = errors.Errorf("fifo %v was closed before opening", h.Name()) - } - if file != nil { - file.Close() - } - } - default: - } - if err != nil { - f.closedOnce.Do(func() { - f.err = err - close(f.closed) - }) - return - } - f.file = file - close(f.opened) - }() - if block { - select { - case <-f.opened: - case <-f.closed: - return nil, f.err - } - } - return f, nil -} - -// Read from a fifo to a byte array. -func (f *fifo) Read(b []byte) (int, error) { - if f.flag&syscall.O_WRONLY > 0 { - return 0, ErrRdFrmWRONLY - } - select { - case <-f.opened: - return f.file.Read(b) - default: - } - select { - case <-f.opened: - return f.file.Read(b) - case <-f.closed: - return 0, ErrReadClosed - } -} - -// Write from byte array to a fifo. -func (f *fifo) Write(b []byte) (int, error) { - if f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 { - return 0, ErrWrToRDONLY - } - select { - case <-f.opened: - return f.file.Write(b) - default: - } - select { - case <-f.opened: - return f.file.Write(b) - case <-f.closed: - return 0, ErrWriteClosed - } -} - -// Close the fifo. Next reads/writes will error. This method can also be used -// before open(2) has returned and fifo was never opened. -func (f *fifo) Close() (retErr error) { - for { - select { - case <-f.closed: - f.handle.Close() - return - default: - select { - case <-f.opened: - f.closedOnce.Do(func() { - retErr = f.file.Close() - f.err = retErr - close(f.closed) - }) - default: - if f.flag&syscall.O_RDWR != 0 { - runtime.Gosched() - break - } - f.closingOnce.Do(func() { - close(f.closing) - }) - reverseMode := syscall.O_WRONLY - if f.flag&syscall.O_WRONLY > 0 { - reverseMode = syscall.O_RDONLY - } - fn, err := f.handle.Path() - // if Close() is called concurrently(shouldn't) it may cause error - // because handle is closed - select { - case <-f.closed: - default: - if err != nil { - // Path has become invalid. We will leak a goroutine. - // This case should not happen in linux. - f.closedOnce.Do(func() { - f.err = err - close(f.closed) - }) - <-f.closed - break - } - f, err := os.OpenFile(fn, reverseMode|syscall.O_NONBLOCK, 0) - if err == nil { - f.Close() - } - runtime.Gosched() - } - } - } - } -} diff --git a/vendor/github.com/containerd/fifo/handle_linux.go b/vendor/github.com/containerd/fifo/handle_linux.go deleted file mode 100644 index 0ee2c9f..0000000 --- a/vendor/github.com/containerd/fifo/handle_linux.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fifo - -import ( - "fmt" - "os" - "sync" - "syscall" - - "github.com/pkg/errors" -) - -//nolint:golint -const O_PATH = 010000000 - -type handle struct { - f *os.File - fd uintptr - dev uint64 - ino uint64 - closeOnce sync.Once - name string -} - -func getHandle(fn string) (*handle, error) { - f, err := os.OpenFile(fn, O_PATH, 0) - if err != nil { - return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn) - } - - var ( - stat syscall.Stat_t - fd = f.Fd() - ) - if err := syscall.Fstat(int(fd), &stat); err != nil { - f.Close() - return nil, errors.Wrapf(err, "failed to stat handle %v", fd) - } - - h := &handle{ - f: f, - name: fn, - //nolint:unconvert - dev: uint64(stat.Dev), - ino: stat.Ino, - fd: fd, - } - - // check /proc just in case - if _, err := os.Stat(h.procPath()); err != nil { - f.Close() - return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath()) - } - - return h, nil -} - -func (h *handle) procPath() string { - return fmt.Sprintf("/proc/self/fd/%d", h.fd) -} - -func (h *handle) Name() string { - return h.name -} - -func (h *handle) Path() (string, error) { - var stat syscall.Stat_t - if err := syscall.Stat(h.procPath(), &stat); err != nil { - return "", errors.Wrapf(err, "path %v could not be statted", h.procPath()) - } - //nolint:unconvert - if uint64(stat.Dev) != h.dev || stat.Ino != h.ino { - return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino) - } - return h.procPath(), nil -} - -func (h *handle) Close() error { - h.closeOnce.Do(func() { - h.f.Close() - }) - return nil -} diff --git a/vendor/github.com/containerd/fifo/handle_nolinux.go b/vendor/github.com/containerd/fifo/handle_nolinux.go deleted file mode 100644 index 81ca308..0000000 --- a/vendor/github.com/containerd/fifo/handle_nolinux.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build !linux,!windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fifo - -import ( - "syscall" - - "github.com/pkg/errors" -) - -type handle struct { - fn string - dev uint64 - ino uint64 -} - -func getHandle(fn string) (*handle, error) { - var stat syscall.Stat_t - if err := syscall.Stat(fn, &stat); err != nil { - return nil, errors.Wrapf(err, "failed to stat %v", fn) - } - - h := &handle{ - fn: fn, - dev: uint64(stat.Dev), //nolint: unconvert - ino: uint64(stat.Ino), //nolint: unconvert - } - - return h, nil -} - -func (h *handle) Path() (string, error) { - var stat syscall.Stat_t - if err := syscall.Stat(h.fn, &stat); err != nil { - return "", errors.Wrapf(err, "path %v could not be statted", h.fn) - } - if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino { //nolint: unconvert - return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn) - } - return h.fn, nil -} - -func (h *handle) Name() string { - return h.fn -} - -func (h *handle) Close() error { - return nil -} diff --git a/vendor/github.com/containerd/fifo/raw.go b/vendor/github.com/containerd/fifo/raw.go deleted file mode 100644 index cead94c..0000000 --- a/vendor/github.com/containerd/fifo/raw.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fifo - -import ( - "syscall" -) - -// SyscallConn provides raw access to the fifo's underlying filedescrptor. -// See syscall.Conn for guarantees provided by this interface. -func (f *fifo) SyscallConn() (syscall.RawConn, error) { - // deterministic check for closed - select { - case <-f.closed: - return nil, ErrClosed - default: - } - - select { - case <-f.closed: - return nil, ErrClosed - case <-f.opened: - return f.file.SyscallConn() - default: - } - - // Not opened and not closed, this means open is non-blocking AND it's not open yet - // Use rawConn to deal with non-blocking open. - rc := &rawConn{f: f, ready: make(chan struct{})} - go func() { - select { - case <-f.closed: - return - case <-f.opened: - rc.raw, rc.err = f.file.SyscallConn() - close(rc.ready) - } - }() - - return rc, nil -} - -type rawConn struct { - f *fifo - ready chan struct{} - raw syscall.RawConn - err error -} - -func (r *rawConn) Control(f func(fd uintptr)) error { - select { - case <-r.f.closed: - return ErrCtrlClosed - case <-r.ready: - } - - if r.err != nil { - return r.err - } - - return r.raw.Control(f) -} - -func (r *rawConn) Read(f func(fd uintptr) (done bool)) error { - if r.f.flag&syscall.O_WRONLY > 0 { - return ErrRdFrmWRONLY - } - - select { - case <-r.f.closed: - return ErrReadClosed - case <-r.ready: - } - - if r.err != nil { - return r.err - } - - return r.raw.Read(f) -} - -func (r *rawConn) Write(f func(fd uintptr) (done bool)) error { - if r.f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 { - return ErrWrToRDONLY - } - - select { - case <-r.f.closed: - return ErrWriteClosed - case <-r.ready: - } - - if r.err != nil { - return r.err - } - - return r.raw.Write(f) -} diff --git a/vendor/github.com/containerd/fifo/readme.md b/vendor/github.com/containerd/fifo/readme.md deleted file mode 100644 index ad4727d..0000000 --- a/vendor/github.com/containerd/fifo/readme.md +++ /dev/null @@ -1,46 +0,0 @@ -### fifo - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/fifo)](https://pkg.go.dev/github.com/containerd/fifo) -[![Build Status](https://github.com/containerd/fifo/workflows/CI/badge.svg)](https://github.com/containerd/fifo/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/fifo/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/fifo) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/fifo)](https://goreportcard.com/report/github.com/containerd/fifo) - -Go package for handling fifos in a sane way. - -``` -// OpenFifo opens a fifo. Returns io.ReadWriteCloser. -// Context can be used to cancel this function until open(2) has not returned. -// Accepted flags: -// - syscall.O_CREAT - create new fifo if one doesn't exist -// - syscall.O_RDONLY - open fifo only from reader side -// - syscall.O_WRONLY - open fifo only from writer side -// - syscall.O_RDWR - open fifo from both sides, never block on syscall level -// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the -// fifo isn't open. read/write will be connected after the actual fifo is -// open or after fifo is closed. -func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) - - -// Read from a fifo to a byte array. -func (f *fifo) Read(b []byte) (int, error) - - -// Write from byte array to a fifo. -func (f *fifo) Write(b []byte) (int, error) - - -// Close the fifo. Next reads/writes will error. This method can also be used -// before open(2) has returned and fifo was never opened. -func (f *fifo) Close() error -``` - -## Project details - -The fifo is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-cni/.golangci.yml b/vendor/github.com/containerd/go-cni/.golangci.yml deleted file mode 100644 index 75f462e..0000000 --- a/vendor/github.com/containerd/go-cni/.golangci.yml +++ /dev/null @@ -1,23 +0,0 @@ -linters: - enable: - - structcheck - - varcheck - - staticcheck - - unconvert - - gofmt - - goimports - - golint - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -# FIXME: re-enable after fixing GoDoc in this repository -#issues: -# include: -# - EXC0002 - -run: - timeout: 2m diff --git a/vendor/github.com/containerd/go-cni/LICENSE b/vendor/github.com/containerd/go-cni/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/containerd/go-cni/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/go-cni/README.md b/vendor/github.com/containerd/go-cni/README.md deleted file mode 100644 index b2652c4..0000000 --- a/vendor/github.com/containerd/go-cni/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# go-cni - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/go-cni)](https://pkg.go.dev/github.com/containerd/go-cni) -[![Build Status](https://github.com/containerd/go-cni/workflows/CI/badge.svg)](https://github.com/containerd/go-cni/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/go-cni/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/go-cni) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/go-cni)](https://goreportcard.com/report/github.com/containerd/go-cni) - -A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to: - -- Load CNI network config from different sources -- Setup networks for container namespace -- Remove networks from container namespace -- Query status of CNI network plugin initialization - -go-cni aims to support plugins that implement [Container Network Interface](https://github.com/containernetworking/cni) - -## Usage -```go -package main - -import ( - "context" - "fmt" - "log" - - gocni "github.com/containerd/go-cni" -) - -func main() { - id := "example" - netns := "/var/run/netns/example-ns-1" - - // CNI allows multiple CNI configurations and the network interface - // will be named by eth0, eth1, ..., ethN. - ifPrefixName := "eth" - defaultIfName := "eth0" - - // Initializes library - l, err := gocni.New( - // one for loopback network interface - gocni.WithMinNetworkCount(2), - gocni.WithPluginConfDir("/etc/cni/net.d"), - gocni.WithPluginDir([]string{"/opt/cni/bin"}), - // Sets the prefix for network interfaces, eth by default - gocni.WithInterfacePrefix(ifPrefixName)) - if err != nil { - log.Fatalf("failed to initialize cni library: %v", err) - } - - // Load the cni configuration - if err := l.Load(gocni.WithLoNetwork, gocni.WithDefaultConf); err != nil { - log.Fatalf("failed to load cni configuration: %v", err) - } - - // Setup network for namespace. - labels := map[string]string{ - "K8S_POD_NAMESPACE": "namespace1", - "K8S_POD_NAME": "pod1", - "K8S_POD_INFRA_CONTAINER_ID": id, - // Plugin tolerates all Args embedded by unknown labels, like - // K8S_POD_NAMESPACE/NAME/INFRA_CONTAINER_ID... - "IgnoreUnknown": "1", - } - - ctx := context.Background() - - // Teardown network - defer func() { - if err := l.Remove(ctx, id, netns, gocni.WithLabels(labels)); err != nil { - log.Fatalf("failed to teardown network: %v", err) - } - }() - - // Setup network - result, err := l.Setup(ctx, id, netns, gocni.WithLabels(labels)) - if err != nil { - log.Fatalf("failed to setup network for namespace: %v", err) - } - - // Get IP of the default interface - IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String() - fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP) -} -``` - -## Project details - -The go-cni is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-cni/cni.go b/vendor/github.com/containerd/go-cni/cni.go deleted file mode 100644 index 2806caf..0000000 --- a/vendor/github.com/containerd/go-cni/cni.go +++ /dev/null @@ -1,228 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "context" - "fmt" - "strings" - "sync" - - cnilibrary "github.com/containernetworking/cni/libcni" - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/current" - "github.com/pkg/errors" -) - -type CNI interface { - // Setup setup the network for the namespace - Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) - // Remove tears down the network of the namespace. - Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error - // Load loads the cni network config - Load(opts ...Opt) error - // Status checks the status of the cni initialization - Status() error - // GetConfig returns a copy of the CNI plugin configurations as parsed by CNI - GetConfig() *ConfigResult -} - -type ConfigResult struct { - PluginDirs []string - PluginConfDir string - PluginMaxConfNum int - Prefix string - Networks []*ConfNetwork -} - -type ConfNetwork struct { - Config *NetworkConfList - IFName string -} - -// NetworkConfList is a source bytes to string version of cnilibrary.NetworkConfigList -type NetworkConfList struct { - Name string - CNIVersion string - Plugins []*NetworkConf - Source string -} - -// NetworkConf is a source bytes to string conversion of cnilibrary.NetworkConfig -type NetworkConf struct { - Network *types.NetConf - Source string -} - -type libcni struct { - config - - cniConfig cnilibrary.CNI - networkCount int // minimum network plugin configurations needed to initialize cni - networks []*Network - sync.RWMutex -} - -func defaultCNIConfig() *libcni { - return &libcni{ - config: config{ - pluginDirs: []string{DefaultCNIDir}, - pluginConfDir: DefaultNetDir, - pluginMaxConfNum: DefaultMaxConfNum, - prefix: DefaultPrefix, - }, - cniConfig: &cnilibrary.CNIConfig{ - Path: []string{DefaultCNIDir}, - }, - networkCount: 1, - } -} - -// New creates a new libcni instance. -func New(config ...Opt) (CNI, error) { - cni := defaultCNIConfig() - var err error - for _, c := range config { - if err = c(cni); err != nil { - return nil, err - } - } - return cni, nil -} - -// Load loads the latest config from cni config files. -func (c *libcni) Load(opts ...Opt) error { - var err error - c.Lock() - defer c.Unlock() - // Reset the networks on a load operation to ensure - // config happens on a clean slate - c.reset() - - for _, o := range opts { - if err = o(c); err != nil { - return errors.Wrapf(ErrLoad, fmt.Sprintf("cni config load failed: %v", err)) - } - } - return nil -} - -// Status returns the status of CNI initialization. -func (c *libcni) Status() error { - c.RLock() - defer c.RUnlock() - if len(c.networks) < c.networkCount { - return ErrCNINotInitialized - } - return nil -} - -// Networks returns all the configured networks. -// NOTE: Caller MUST NOT modify anything in the returned array. -func (c *libcni) Networks() []*Network { - c.RLock() - defer c.RUnlock() - return append([]*Network{}, c.networks...) -} - -// Setup setups the network in the namespace and returns a Result -func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) { - if err := c.Status(); err != nil { - return nil, err - } - ns, err := newNamespace(id, path, opts...) - if err != nil { - return nil, err - } - result, err := c.attachNetworks(ctx, ns) - if err != nil { - return nil, err - } - return c.createResult(result) -} - -func (c *libcni) attachNetworks(ctx context.Context, ns *Namespace) ([]*current.Result, error) { - var results []*current.Result - for _, network := range c.Networks() { - r, err := network.Attach(ctx, ns) - if err != nil { - return nil, err - } - results = append(results, r) - } - return results, nil -} - -// Remove removes the network config from the namespace -func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error { - if err := c.Status(); err != nil { - return err - } - ns, err := newNamespace(id, path, opts...) - if err != nil { - return err - } - for _, network := range c.Networks() { - if err := network.Remove(ctx, ns); err != nil { - // Based on CNI spec v0.7.0, empty network namespace is allowed to - // do best effort cleanup. However, it is not handled consistently - // right now: - // https://github.com/containernetworking/plugins/issues/210 - // TODO(random-liu): Remove the error handling when the issue is - // fixed and the CNI spec v0.6.0 support is deprecated. - if path == "" && strings.Contains(err.Error(), "no such file or directory") { - continue - } - return err - } - } - return nil -} - -// GetConfig returns a copy of the CNI plugin configurations as parsed by CNI -func (c *libcni) GetConfig() *ConfigResult { - c.RLock() - defer c.RUnlock() - r := &ConfigResult{ - PluginDirs: c.config.pluginDirs, - PluginConfDir: c.config.pluginConfDir, - PluginMaxConfNum: c.config.pluginMaxConfNum, - Prefix: c.config.prefix, - } - for _, network := range c.networks { - conf := &NetworkConfList{ - Name: network.config.Name, - CNIVersion: network.config.CNIVersion, - Source: string(network.config.Bytes), - } - for _, plugin := range network.config.Plugins { - conf.Plugins = append(conf.Plugins, &NetworkConf{ - Network: plugin.Network, - Source: string(plugin.Bytes), - }) - } - r.Networks = append(r.Networks, &ConfNetwork{ - Config: conf, - IFName: network.ifName, - }) - } - return r -} - -func (c *libcni) reset() { - c.networks = nil -} diff --git a/vendor/github.com/containerd/go-cni/deprecated.go b/vendor/github.com/containerd/go-cni/deprecated.go deleted file mode 100644 index 158793b..0000000 --- a/vendor/github.com/containerd/go-cni/deprecated.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import "github.com/containernetworking/cni/pkg/types/current" - -// Deprecated: use cni.Opt instead -type CNIOpt = Opt //nolint: golint // type name will be used as cni.CNIOpt by other packages, and that stutters - -// Deprecated: use cni.Result instead -type CNIResult = Result //nolint: golint // type name will be used as cni.CNIResult by other packages, and that stutters - -// GetCNIResultFromResults creates a Result from the given slice of current.Result, -// adding structured data containing the interface configuration for each of the -// interfaces created in the namespace. It returns an error if validation of -// results fails, or if a network could not be found. -// Deprecated: do not use -func (c *libcni) GetCNIResultFromResults(results []*current.Result) (*Result, error) { - return c.createResult(results) -} diff --git a/vendor/github.com/containerd/go-cni/errors.go b/vendor/github.com/containerd/go-cni/errors.go deleted file mode 100644 index 3fbdf77..0000000 --- a/vendor/github.com/containerd/go-cni/errors.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "github.com/pkg/errors" -) - -var ( - ErrCNINotInitialized = errors.New("cni plugin not initialized") - ErrInvalidConfig = errors.New("invalid cni config") - ErrNotFound = errors.New("not found") - ErrRead = errors.New("failed to read config file") - ErrInvalidResult = errors.New("invalid result") - ErrLoad = errors.New("failed to load cni config") -) - -// IsCNINotInitialized returns true if the error is due to cni config not being initialized -func IsCNINotInitialized(err error) bool { - return errors.Is(err, ErrCNINotInitialized) -} - -// IsInvalidConfig returns true if the error is invalid cni config -func IsInvalidConfig(err error) bool { - return errors.Is(err, ErrInvalidConfig) -} - -// IsNotFound returns true if the error is due to a missing config or result -func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) -} - -// IsReadFailure return true if the error is a config read failure -func IsReadFailure(err error) bool { - return errors.Is(err, ErrRead) -} - -// IsInvalidResult return true if the error is due to invalid cni result -func IsInvalidResult(err error) bool { - return errors.Is(err, ErrInvalidResult) -} diff --git a/vendor/github.com/containerd/go-cni/helper.go b/vendor/github.com/containerd/go-cni/helper.go deleted file mode 100644 index 088cb9b..0000000 --- a/vendor/github.com/containerd/go-cni/helper.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "fmt" - - "github.com/containernetworking/cni/pkg/types/current" -) - -func validateInterfaceConfig(ipConf *current.IPConfig, ifs int) error { - if ipConf == nil { - return fmt.Errorf("invalid IP configuration (nil)") - } - if ipConf.Interface != nil && *ipConf.Interface > ifs { - return fmt.Errorf("invalid IP configuration (interface number %d is > number of interfaces %d)", *ipConf.Interface, ifs) - } - return nil -} - -func getIfName(prefix string, i int) string { - return fmt.Sprintf("%s%d", prefix, i) -} - -func defaultInterface(prefix string) string { - return getIfName(prefix, 0) -} diff --git a/vendor/github.com/containerd/go-cni/namespace.go b/vendor/github.com/containerd/go-cni/namespace.go deleted file mode 100644 index ff14b01..0000000 --- a/vendor/github.com/containerd/go-cni/namespace.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "context" - - cnilibrary "github.com/containernetworking/cni/libcni" - "github.com/containernetworking/cni/pkg/types/current" -) - -type Network struct { - cni cnilibrary.CNI - config *cnilibrary.NetworkConfigList - ifName string -} - -func (n *Network) Attach(ctx context.Context, ns *Namespace) (*current.Result, error) { - r, err := n.cni.AddNetworkList(ctx, n.config, ns.config(n.ifName)) - if err != nil { - return nil, err - } - return current.NewResultFromResult(r) -} - -func (n *Network) Remove(ctx context.Context, ns *Namespace) error { - return n.cni.DelNetworkList(ctx, n.config, ns.config(n.ifName)) -} - -type Namespace struct { - id string - path string - capabilityArgs map[string]interface{} - args map[string]string -} - -func newNamespace(id, path string, opts ...NamespaceOpts) (*Namespace, error) { - ns := &Namespace{ - id: id, - path: path, - capabilityArgs: make(map[string]interface{}), - args: make(map[string]string), - } - for _, o := range opts { - if err := o(ns); err != nil { - return nil, err - } - } - return ns, nil -} - -func (ns *Namespace) config(ifName string) *cnilibrary.RuntimeConf { - c := &cnilibrary.RuntimeConf{ - ContainerID: ns.id, - NetNS: ns.path, - IfName: ifName, - } - for k, v := range ns.args { - c.Args = append(c.Args, [2]string{k, v}) - } - c.CapabilityArgs = ns.capabilityArgs - return c -} diff --git a/vendor/github.com/containerd/go-cni/namespace_opts.go b/vendor/github.com/containerd/go-cni/namespace_opts.go deleted file mode 100644 index 1fad5f6..0000000 --- a/vendor/github.com/containerd/go-cni/namespace_opts.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -type NamespaceOpts func(s *Namespace) error - -// Capabilities -func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["portMappings"] = portMapping - return nil - } -} - -func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["ipRanges"] = ipRanges - return nil - } -} - -// WithCapabilityBandWitdh adds support for traffic shaping: -// https://github.com/heptio/cni-plugins/tree/master/plugins/meta/bandwidth -func WithCapabilityBandWidth(bandWidth BandWidth) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["bandwidth"] = bandWidth - return nil - } -} - -// WithCapabilityDNS adds support for dns -func WithCapabilityDNS(dns DNS) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["dns"] = dns - return nil - } -} - -func WithCapability(name string, capability interface{}) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs[name] = capability - return nil - } -} - -// Args -func WithLabels(labels map[string]string) NamespaceOpts { - return func(c *Namespace) error { - for k, v := range labels { - c.args[k] = v - } - return nil - } -} - -func WithArgs(k, v string) NamespaceOpts { - return func(c *Namespace) error { - c.args[k] = v - return nil - } -} diff --git a/vendor/github.com/containerd/go-cni/opts.go b/vendor/github.com/containerd/go-cni/opts.go deleted file mode 100644 index 9ced1d7..0000000 --- a/vendor/github.com/containerd/go-cni/opts.go +++ /dev/null @@ -1,264 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "sort" - "strings" - - cnilibrary "github.com/containernetworking/cni/libcni" - "github.com/pkg/errors" -) - -// Opt sets options for a CNI instance -type Opt func(c *libcni) error - -// WithInterfacePrefix sets the prefix for network interfaces -// e.g. eth or wlan -func WithInterfacePrefix(prefix string) Opt { - return func(c *libcni) error { - c.prefix = prefix - return nil - } -} - -// WithPluginDir can be used to set the locations of -// the cni plugin binaries -func WithPluginDir(dirs []string) Opt { - return func(c *libcni) error { - c.pluginDirs = dirs - c.cniConfig = &cnilibrary.CNIConfig{Path: dirs} - return nil - } -} - -// WithPluginConfDir can be used to configure the -// cni configuration directory. -func WithPluginConfDir(dir string) Opt { - return func(c *libcni) error { - c.pluginConfDir = dir - return nil - } -} - -// WithPluginMaxConfNum can be used to configure the -// max cni plugin config file num. -func WithPluginMaxConfNum(max int) Opt { - return func(c *libcni) error { - c.pluginMaxConfNum = max - return nil - } -} - -// WithMinNetworkCount can be used to configure the -// minimum networks to be configured and initialized -// for the status to report success. By default its 1. -func WithMinNetworkCount(count int) Opt { - return func(c *libcni) error { - c.networkCount = count - return nil - } -} - -// WithLoNetwork can be used to load the loopback -// network config. -func WithLoNetwork(c *libcni) error { - loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{ -"cniVersion": "0.3.1", -"name": "cni-loopback", -"plugins": [{ - "type": "loopback" -}] -}`)) - - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: loConfig, - ifName: "lo", - }) - return nil -} - -// WithConf can be used to load config directly -// from byte. -func WithConf(bytes []byte) Opt { - return WithConfIndex(bytes, 0) -} - -// WithConfIndex can be used to load config directly -// from byte and set the interface name's index. -func WithConfIndex(bytes []byte, index int) Opt { - return func(c *libcni) error { - conf, err := cnilibrary.ConfFromBytes(bytes) - if err != nil { - return err - } - confList, err := cnilibrary.ConfListFromConf(conf) - if err != nil { - return err - } - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, index), - }) - return nil - } -} - -// WithConfFile can be used to load network config -// from an .conf file. Supported with absolute fileName -// with path only. -func WithConfFile(fileName string) Opt { - return func(c *libcni) error { - conf, err := cnilibrary.ConfFromFile(fileName) - if err != nil { - return err - } - // upconvert to conf list - confList, err := cnilibrary.ConfListFromConf(conf) - if err != nil { - return err - } - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, 0), - }) - return nil - } -} - -// WithConfListBytes can be used to load network config list directly -// from byte -func WithConfListBytes(bytes []byte) Opt { - return func(c *libcni) error { - confList, err := cnilibrary.ConfListFromBytes(bytes) - if err != nil { - return err - } - i := len(c.networks) - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, i), - }) - return nil - } -} - -// WithConfListFile can be used to load network config -// from an .conflist file. Supported with absolute fileName -// with path only. -func WithConfListFile(fileName string) Opt { - return func(c *libcni) error { - confList, err := cnilibrary.ConfListFromFile(fileName) - if err != nil { - return err - } - i := len(c.networks) - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, i), - }) - return nil - } -} - -// WithDefaultConf can be used to detect the default network -// config file from the configured cni config directory and load -// it. -// Since the CNI spec does not specify a way to detect default networks, -// the convention chosen is - the first network configuration in the sorted -// list of network conf files as the default network. -func WithDefaultConf(c *libcni) error { - return loadFromConfDir(c, c.pluginMaxConfNum) -} - -// WithAllConf can be used to detect all network config -// files from the configured cni config directory and load -// them. -func WithAllConf(c *libcni) error { - return loadFromConfDir(c, 0) -} - -// loadFromConfDir detects network config files from the -// configured cni config directory and load them. max is -// the maximum network config to load (max i<= 0 means no limit). -func loadFromConfDir(c *libcni, max int) error { - files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"}) - switch { - case err != nil: - return errors.Wrapf(ErrRead, "failed to read config file: %v", err) - case len(files) == 0: - return errors.Wrapf(ErrCNINotInitialized, "no network config found in %s", c.pluginConfDir) - } - - // files contains the network config files associated with cni network. - // Use lexicographical way as a defined order for network config files. - sort.Strings(files) - // Since the CNI spec does not specify a way to detect default networks, - // the convention chosen is - the first network configuration in the sorted - // list of network conf files as the default network and choose the default - // interface provided during init as the network interface for this default - // network. For every other network use a generated interface id. - i := 0 - var networks []*Network - for _, confFile := range files { - var confList *cnilibrary.NetworkConfigList - if strings.HasSuffix(confFile, ".conflist") { - confList, err = cnilibrary.ConfListFromFile(confFile) - if err != nil { - return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config list file %s: %v", confFile, err) - } - } else { - conf, err := cnilibrary.ConfFromFile(confFile) - if err != nil { - return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config file %s: %v", confFile, err) - } - // Ensure the config has a "type" so we know what plugin to run. - // Also catches the case where somebody put a conflist into a conf file. - if conf.Network.Type == "" { - return errors.Wrapf(ErrInvalidConfig, "network type not found in %s", confFile) - } - - confList, err = cnilibrary.ConfListFromConf(conf) - if err != nil { - return errors.Wrapf(ErrInvalidConfig, "failed to convert CNI config file %s to CNI config list: %v", confFile, err) - } - } - if len(confList.Plugins) == 0 { - return errors.Wrapf(ErrInvalidConfig, "CNI config list in config file %s has no networks, skipping", confFile) - - } - networks = append(networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, i), - }) - i++ - if i == max { - break - } - } - if len(networks) == 0 { - return errors.Wrapf(ErrCNINotInitialized, "no valid networks found in %s", c.pluginDirs) - } - c.networks = append(c.networks, networks...) - return nil -} diff --git a/vendor/github.com/containerd/go-cni/result.go b/vendor/github.com/containerd/go-cni/result.go deleted file mode 100644 index 3d21d70..0000000 --- a/vendor/github.com/containerd/go-cni/result.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "net" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/current" - "github.com/pkg/errors" -) - -type IPConfig struct { - IP net.IP - Gateway net.IP -} - -// Result contains the network information returned by CNI.Setup -// -// a) Interfaces list. Depending on the plugin, this can include the sandbox -// (eg, container or hypervisor) interface name and/or the host interface -// name, the hardware addresses of each interface, and details about the -// sandbox (if any) the interface is in. -// b) IP configuration assigned to each interface. The IPv4 and/or IPv6 addresses, -// gateways, and routes assigned to sandbox and/or host interfaces. -// c) DNS information. Dictionary that includes DNS information for nameservers, -// domain, search domains and options. -type Result struct { - Interfaces map[string]*Config - DNS []types.DNS - Routes []*types.Route -} - -type Config struct { - IPConfigs []*IPConfig - Mac string - Sandbox string -} - -// createResult creates a Result from the given slice of current.Result, adding -// structured data containing the interface configuration for each of the -// interfaces created in the namespace. It returns an error if validation of -// results fails, or if a network could not be found. -func (c *libcni) createResult(results []*current.Result) (*Result, error) { - c.RLock() - defer c.RUnlock() - r := &Result{ - Interfaces: make(map[string]*Config), - } - - // Plugins may not need to return Interfaces in result if - // if there are no multiple interfaces created. In that case - // all configs should be applied against default interface - r.Interfaces[defaultInterface(c.prefix)] = &Config{} - - // Walk through all the results - for _, result := range results { - // Walk through all the interface in each result - for _, intf := range result.Interfaces { - r.Interfaces[intf.Name] = &Config{ - Mac: intf.Mac, - Sandbox: intf.Sandbox, - } - } - // Walk through all the IPs in the result and attach it to corresponding - // interfaces - for _, ipConf := range result.IPs { - if err := validateInterfaceConfig(ipConf, len(result.Interfaces)); err != nil { - return nil, errors.Wrapf(ErrInvalidResult, "invalid interface config: %v", err) - } - name := c.getInterfaceName(result.Interfaces, ipConf) - r.Interfaces[name].IPConfigs = append(r.Interfaces[name].IPConfigs, - &IPConfig{IP: ipConf.Address.IP, Gateway: ipConf.Gateway}) - } - r.DNS = append(r.DNS, result.DNS) - r.Routes = append(r.Routes, result.Routes...) - } - if _, ok := r.Interfaces[defaultInterface(c.prefix)]; !ok { - return nil, errors.Wrapf(ErrNotFound, "default network not found for: %s", defaultInterface(c.prefix)) - } - return r, nil -} - -// getInterfaceName returns the interface name if the plugins -// return the result with associated interfaces. If interface -// is not present then default interface name is used -func (c *libcni) getInterfaceName(interfaces []*current.Interface, - ipConf *current.IPConfig) string { - if ipConf.Interface != nil { - return interfaces[*ipConf.Interface].Name - } - return defaultInterface(c.prefix) -} diff --git a/vendor/github.com/containerd/go-cni/testutils.go b/vendor/github.com/containerd/go-cni/testutils.go deleted file mode 100644 index d9453c8..0000000 --- a/vendor/github.com/containerd/go-cni/testutils.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "testing" -) - -func makeTmpDir(prefix string) (string, error) { - tmpDir, err := ioutil.TempDir(os.TempDir(), prefix) - if err != nil { - return "", err - } - return tmpDir, nil -} - -func makeFakeCNIConfig(t *testing.T) (string, string) { - cniDir, err := makeTmpDir("fakecni") - if err != nil { - t.Fatalf("Failed to create plugin config dir: %v", err) - } - - cniConfDir := path.Join(cniDir, "net.d") - err = os.MkdirAll(cniConfDir, 0777) - if err != nil { - t.Fatalf("Failed to create network config dir: %v", err) - } - - networkConfig1 := path.Join(cniConfDir, "mocknetwork1.conf") - f1, err := os.Create(networkConfig1) - if err != nil { - t.Fatalf("Failed to create network config %v: %v", f1, err) - } - networkConfig2 := path.Join(cniConfDir, "mocknetwork2.conf") - f2, err := os.Create(networkConfig2) - if err != nil { - t.Fatalf("Failed to create network config %v: %v", f2, err) - } - - cfg1 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin1", "fakecni") - _, err = f1.WriteString(cfg1) - if err != nil { - t.Fatalf("Failed to write network config file %v: %v", f1, err) - } - f1.Close() - cfg2 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin2", "fakecni") - _, err = f2.WriteString(cfg2) - if err != nil { - t.Fatalf("Failed to write network config file %v: %v", f2, err) - } - f2.Close() - return cniDir, cniConfDir -} - -func tearDownCNIConfig(t *testing.T, confDir string) { - err := os.RemoveAll(confDir) - if err != nil { - t.Fatalf("Failed to cleanup CNI configs: %v", err) - } -} diff --git a/vendor/github.com/containerd/go-cni/types.go b/vendor/github.com/containerd/go-cni/types.go deleted file mode 100644 index 0b7db1e..0000000 --- a/vendor/github.com/containerd/go-cni/types.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -const ( - CNIPluginName = "cni" - DefaultNetDir = "/etc/cni/net.d" - DefaultCNIDir = "/opt/cni/bin" - DefaultMaxConfNum = 1 - VendorCNIDirTemplate = "%s/opt/%s/bin" - DefaultPrefix = "eth" -) - -type config struct { - pluginDirs []string - pluginConfDir string - pluginMaxConfNum int - prefix string -} - -type PortMapping struct { - HostPort int32 - ContainerPort int32 - Protocol string - HostIP string -} - -type IPRanges struct { - Subnet string - RangeStart string - RangeEnd string - Gateway string -} - -// BandWidth defines the ingress/egress rate and burst limits -type BandWidth struct { - IngressRate uint64 - IngressBurst uint64 - EgressRate uint64 - EgressBurst uint64 -} - -// DNS defines the dns config -type DNS struct { - // List of DNS servers of the cluster. - Servers []string - // List of DNS search domains of the cluster. - Searches []string - // List of DNS options. - Options []string -} diff --git a/vendor/github.com/containerd/go-runc/.travis.yml b/vendor/github.com/containerd/go-runc/.travis.yml deleted file mode 100644 index 724ee09..0000000 --- a/vendor/github.com/containerd/go-runc/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -go: - - 1.13.x - - 1.14.x - - 1.15.x - -install: - - go get -t ./... - - go get -u github.com/vbatts/git-validation - - go get -u github.com/kunalkushwaha/ltag - -before_script: - - pushd ..; git clone https://github.com/containerd/project; popd - -script: - - DCO_VERBOSITY=-q ../project/script/validate/dco - - ../project/script/validate/fileheader ../project/ - - go test -v -race -covermode=atomic -coverprofile=coverage.txt ./... - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/containerd/go-runc/LICENSE b/vendor/github.com/containerd/go-runc/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/containerd/go-runc/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/go-runc/README.md b/vendor/github.com/containerd/go-runc/README.md deleted file mode 100644 index c899bdd..0000000 --- a/vendor/github.com/containerd/go-runc/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# go-runc - -[![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc) -[![codecov](https://codecov.io/gh/containerd/go-runc/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/go-runc) - -This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications. -It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource! - -This needs runc @ [a9610f2c0](https://github.com/opencontainers/runc/commit/a9610f2c0237d2636d05a031ec8659a70e75ffeb) -or greater. - -## Docs - -Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc). - -## Project details - -The go-runc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-runc/command_linux.go b/vendor/github.com/containerd/go-runc/command_linux.go deleted file mode 100644 index 8a30f67..0000000 --- a/vendor/github.com/containerd/go-runc/command_linux.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "context" - "os" - "os/exec" - "strings" - "syscall" -) - -func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { - command := r.Command - if command == "" { - command = DefaultCommand - } - cmd := exec.CommandContext(context, command, append(r.args(), args...)...) - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: r.Setpgid, - } - cmd.Env = filterEnv(os.Environ(), "NOTIFY_SOCKET") // NOTIFY_SOCKET introduces a special behavior in runc but should only be set if invoked from systemd - if r.PdeathSignal != 0 { - cmd.SysProcAttr.Pdeathsig = r.PdeathSignal - } - - return cmd -} - -func filterEnv(in []string, names ...string) []string { - out := make([]string, 0, len(in)) -loop0: - for _, v := range in { - for _, k := range names { - if strings.HasPrefix(v, k+"=") { - continue loop0 - } - } - out = append(out, v) - } - return out -} diff --git a/vendor/github.com/containerd/go-runc/console.go b/vendor/github.com/containerd/go-runc/console.go deleted file mode 100644 index ff223e4..0000000 --- a/vendor/github.com/containerd/go-runc/console.go +++ /dev/null @@ -1,165 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - - "github.com/containerd/console" - "golang.org/x/sys/unix" -) - -// NewConsoleSocket creates a new unix socket at the provided path to accept a -// pty master created by runc for use by the container -func NewConsoleSocket(path string) (*Socket, error) { - abs, err := filepath.Abs(path) - if err != nil { - return nil, err - } - addr, err := net.ResolveUnixAddr("unix", abs) - if err != nil { - return nil, err - } - l, err := net.ListenUnix("unix", addr) - if err != nil { - return nil, err - } - return &Socket{ - l: l, - }, nil -} - -// NewTempConsoleSocket returns a temp console socket for use with a container -// On Close(), the socket is deleted -func NewTempConsoleSocket() (*Socket, error) { - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - dir, err := ioutil.TempDir(runtimeDir, "pty") - if err != nil { - return nil, err - } - abs, err := filepath.Abs(filepath.Join(dir, "pty.sock")) - if err != nil { - return nil, err - } - addr, err := net.ResolveUnixAddr("unix", abs) - if err != nil { - return nil, err - } - l, err := net.ListenUnix("unix", addr) - if err != nil { - return nil, err - } - if runtimeDir != "" { - if err := os.Chmod(abs, 0755|os.ModeSticky); err != nil { - return nil, err - } - } - return &Socket{ - l: l, - rmdir: true, - }, nil -} - -// Socket is a unix socket that accepts the pty master created by runc -type Socket struct { - rmdir bool - l *net.UnixListener -} - -// Path returns the path to the unix socket on disk -func (c *Socket) Path() string { - return c.l.Addr().String() -} - -// recvFd waits for a file descriptor to be sent over the given AF_UNIX -// socket. The file name of the remote file descriptor will be recreated -// locally (it is sent as non-auxiliary data in the same payload). -func recvFd(socket *net.UnixConn) (*os.File, error) { - const MaxNameLen = 4096 - var oobSpace = unix.CmsgSpace(4) - - name := make([]byte, MaxNameLen) - oob := make([]byte, oobSpace) - - n, oobn, _, _, err := socket.ReadMsgUnix(name, oob) - if err != nil { - return nil, err - } - - if n >= MaxNameLen || oobn != oobSpace { - return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn) - } - - // Truncate. - name = name[:n] - oob = oob[:oobn] - - scms, err := unix.ParseSocketControlMessage(oob) - if err != nil { - return nil, err - } - if len(scms) != 1 { - return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms)) - } - scm := scms[0] - - fds, err := unix.ParseUnixRights(&scm) - if err != nil { - return nil, err - } - if len(fds) != 1 { - return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds)) - } - fd := uintptr(fds[0]) - - return os.NewFile(fd, string(name)), nil -} - -// ReceiveMaster blocks until the socket receives the pty master -func (c *Socket) ReceiveMaster() (console.Console, error) { - conn, err := c.l.Accept() - if err != nil { - return nil, err - } - defer conn.Close() - uc, ok := conn.(*net.UnixConn) - if !ok { - return nil, fmt.Errorf("received connection which was not a unix socket") - } - f, err := recvFd(uc) - if err != nil { - return nil, err - } - return console.ConsoleFromFile(f) -} - -// Close closes the unix socket -func (c *Socket) Close() error { - err := c.l.Close() - if c.rmdir { - if rerr := os.RemoveAll(filepath.Dir(c.Path())); err == nil { - err = rerr - } - } - return err -} diff --git a/vendor/github.com/containerd/go-runc/container.go b/vendor/github.com/containerd/go-runc/container.go deleted file mode 100644 index 107381a..0000000 --- a/vendor/github.com/containerd/go-runc/container.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import "time" - -// Container hold information for a runc container -type Container struct { - ID string `json:"id"` - Pid int `json:"pid"` - Status string `json:"status"` - Bundle string `json:"bundle"` - Rootfs string `json:"rootfs"` - Created time.Time `json:"created"` - Annotations map[string]string `json:"annotations"` -} diff --git a/vendor/github.com/containerd/go-runc/events.go b/vendor/github.com/containerd/go-runc/events.go deleted file mode 100644 index d610aeb..0000000 --- a/vendor/github.com/containerd/go-runc/events.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -type Event struct { - // Type are the event type generated by runc - // If the type is "error" then check the Err field on the event for - // the actual error - Type string `json:"type"` - ID string `json:"id"` - Stats *Stats `json:"data,omitempty"` - // Err has a read error if we were unable to decode the event from runc - Err error `json:"-"` -} - -type Stats struct { - Cpu Cpu `json:"cpu"` - Memory Memory `json:"memory"` - Pids Pids `json:"pids"` - Blkio Blkio `json:"blkio"` - Hugetlb map[string]Hugetlb `json:"hugetlb"` -} - -type Hugetlb struct { - Usage uint64 `json:"usage,omitempty"` - Max uint64 `json:"max,omitempty"` - Failcnt uint64 `json:"failcnt"` -} - -type BlkioEntry struct { - Major uint64 `json:"major,omitempty"` - Minor uint64 `json:"minor,omitempty"` - Op string `json:"op,omitempty"` - Value uint64 `json:"value,omitempty"` -} - -type Blkio struct { - IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` - IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` - IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` - IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` - IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` - IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` - IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` - SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` -} - -type Pids struct { - Current uint64 `json:"current,omitempty"` - Limit uint64 `json:"limit,omitempty"` -} - -type Throttling struct { - Periods uint64 `json:"periods,omitempty"` - ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` - ThrottledTime uint64 `json:"throttledTime,omitempty"` -} - -type CpuUsage struct { - // Units: nanoseconds. - Total uint64 `json:"total,omitempty"` - Percpu []uint64 `json:"percpu,omitempty"` - Kernel uint64 `json:"kernel"` - User uint64 `json:"user"` -} - -type Cpu struct { - Usage CpuUsage `json:"usage,omitempty"` - Throttling Throttling `json:"throttling,omitempty"` -} - -type MemoryEntry struct { - Limit uint64 `json:"limit"` - Usage uint64 `json:"usage,omitempty"` - Max uint64 `json:"max,omitempty"` - Failcnt uint64 `json:"failcnt"` -} - -type Memory struct { - Cache uint64 `json:"cache,omitempty"` - Usage MemoryEntry `json:"usage,omitempty"` - Swap MemoryEntry `json:"swap,omitempty"` - Kernel MemoryEntry `json:"kernel,omitempty"` - KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` - Raw map[string]uint64 `json:"raw,omitempty"` -} diff --git a/vendor/github.com/containerd/go-runc/io.go b/vendor/github.com/containerd/go-runc/io.go deleted file mode 100644 index 6cf0410..0000000 --- a/vendor/github.com/containerd/go-runc/io.go +++ /dev/null @@ -1,218 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "io" - "os" - "os/exec" -) - -type IO interface { - io.Closer - Stdin() io.WriteCloser - Stdout() io.ReadCloser - Stderr() io.ReadCloser - Set(*exec.Cmd) -} - -type StartCloser interface { - CloseAfterStart() error -} - -// IOOpt sets I/O creation options -type IOOpt func(*IOOption) - -// IOOption holds I/O creation options -type IOOption struct { - OpenStdin bool - OpenStdout bool - OpenStderr bool -} - -func defaultIOOption() *IOOption { - return &IOOption{ - OpenStdin: true, - OpenStdout: true, - OpenStderr: true, - } -} - -func newPipe() (*pipe, error) { - r, w, err := os.Pipe() - if err != nil { - return nil, err - } - return &pipe{ - r: r, - w: w, - }, nil -} - -type pipe struct { - r *os.File - w *os.File -} - -func (p *pipe) Close() error { - err := p.w.Close() - if rerr := p.r.Close(); err == nil { - err = rerr - } - return err -} - -type pipeIO struct { - in *pipe - out *pipe - err *pipe -} - -func (i *pipeIO) Stdin() io.WriteCloser { - if i.in == nil { - return nil - } - return i.in.w -} - -func (i *pipeIO) Stdout() io.ReadCloser { - if i.out == nil { - return nil - } - return i.out.r -} - -func (i *pipeIO) Stderr() io.ReadCloser { - if i.err == nil { - return nil - } - return i.err.r -} - -func (i *pipeIO) Close() error { - var err error - for _, v := range []*pipe{ - i.in, - i.out, - i.err, - } { - if v != nil { - if cerr := v.Close(); err == nil { - err = cerr - } - } - } - return err -} - -func (i *pipeIO) CloseAfterStart() error { - for _, f := range []*pipe{ - i.out, - i.err, - } { - if f != nil { - f.w.Close() - } - } - return nil -} - -// Set sets the io to the exec.Cmd -func (i *pipeIO) Set(cmd *exec.Cmd) { - if i.in != nil { - cmd.Stdin = i.in.r - } - if i.out != nil { - cmd.Stdout = i.out.w - } - if i.err != nil { - cmd.Stderr = i.err.w - } -} - -func NewSTDIO() (IO, error) { - return &stdio{}, nil -} - -type stdio struct { -} - -func (s *stdio) Close() error { - return nil -} - -func (s *stdio) Set(cmd *exec.Cmd) { - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr -} - -func (s *stdio) Stdin() io.WriteCloser { - return os.Stdin -} - -func (s *stdio) Stdout() io.ReadCloser { - return os.Stdout -} - -func (s *stdio) Stderr() io.ReadCloser { - return os.Stderr -} - -// NewNullIO returns IO setup for /dev/null use with runc -func NewNullIO() (IO, error) { - f, err := os.Open(os.DevNull) - if err != nil { - return nil, err - } - return &nullIO{ - devNull: f, - }, nil -} - -type nullIO struct { - devNull *os.File -} - -func (n *nullIO) Close() error { - // this should be closed after start but if not - // make sure we close the file but don't return the error - n.devNull.Close() - return nil -} - -func (n *nullIO) Stdin() io.WriteCloser { - return nil -} - -func (n *nullIO) Stdout() io.ReadCloser { - return nil -} - -func (n *nullIO) Stderr() io.ReadCloser { - return nil -} - -func (n *nullIO) Set(c *exec.Cmd) { - // don't set STDIN here - c.Stdout = n.devNull - c.Stderr = n.devNull -} - -func (n *nullIO) CloseAfterStart() error { - return n.devNull.Close() -} diff --git a/vendor/github.com/containerd/go-runc/io_unix.go b/vendor/github.com/containerd/go-runc/io_unix.go deleted file mode 100644 index ccf1dd4..0000000 --- a/vendor/github.com/containerd/go-runc/io_unix.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" - "runtime" -) - -// NewPipeIO creates pipe pairs to be used with runc -func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) { - option := defaultIOOption() - for _, o := range opts { - o(option) - } - var ( - pipes []*pipe - stdin, stdout, stderr *pipe - ) - // cleanup in case of an error - defer func() { - if err != nil { - for _, p := range pipes { - p.Close() - } - } - }() - if option.OpenStdin { - if stdin, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdin) - if err = unix.Fchown(int(stdin.r.Fd()), uid, gid); err != nil { - // TODO: revert with proper darwin solution, skipping for now - // as darwin chown is returning EINVAL on anonymous pipe - if runtime.GOOS == "darwin" { - logrus.WithError(err).Debug("failed to chown stdin, ignored") - } else { - return nil, errors.Wrap(err, "failed to chown stdin") - } - } - } - if option.OpenStdout { - if stdout, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdout) - if err = unix.Fchown(int(stdout.w.Fd()), uid, gid); err != nil { - // TODO: revert with proper darwin solution, skipping for now - // as darwin chown is returning EINVAL on anonymous pipe - if runtime.GOOS == "darwin" { - logrus.WithError(err).Debug("failed to chown stdout, ignored") - } else { - return nil, errors.Wrap(err, "failed to chown stdout") - } - } - } - if option.OpenStderr { - if stderr, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stderr) - if err = unix.Fchown(int(stderr.w.Fd()), uid, gid); err != nil { - // TODO: revert with proper darwin solution, skipping for now - // as darwin chown is returning EINVAL on anonymous pipe - if runtime.GOOS == "darwin" { - logrus.WithError(err).Debug("failed to chown stderr, ignored") - } else { - return nil, errors.Wrap(err, "failed to chown stderr") - } - } - } - return &pipeIO{ - in: stdin, - out: stdout, - err: stderr, - }, nil -} diff --git a/vendor/github.com/containerd/go-runc/io_windows.go b/vendor/github.com/containerd/go-runc/io_windows.go deleted file mode 100644 index fc56ac4..0000000 --- a/vendor/github.com/containerd/go-runc/io_windows.go +++ /dev/null @@ -1,62 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -// NewPipeIO creates pipe pairs to be used with runc -func NewPipeIO(opts ...IOOpt) (i IO, err error) { - option := defaultIOOption() - for _, o := range opts { - o(option) - } - var ( - pipes []*pipe - stdin, stdout, stderr *pipe - ) - // cleanup in case of an error - defer func() { - if err != nil { - for _, p := range pipes { - p.Close() - } - } - }() - if option.OpenStdin { - if stdin, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdin) - } - if option.OpenStdout { - if stdout, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdout) - } - if option.OpenStderr { - if stderr, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stderr) - } - return &pipeIO{ - in: stdin, - out: stdout, - err: stderr, - }, nil -} diff --git a/vendor/github.com/containerd/go-runc/monitor.go b/vendor/github.com/containerd/go-runc/monitor.go deleted file mode 100644 index ff06a3f..0000000 --- a/vendor/github.com/containerd/go-runc/monitor.go +++ /dev/null @@ -1,76 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "os/exec" - "syscall" - "time" -) - -var Monitor ProcessMonitor = &defaultMonitor{} - -type Exit struct { - Timestamp time.Time - Pid int - Status int -} - -// ProcessMonitor is an interface for process monitoring -// -// It allows daemons using go-runc to have a SIGCHLD handler -// to handle exits without introducing races between the handler -// and go's exec.Cmd -// These methods should match the methods exposed by exec.Cmd to provide -// a consistent experience for the caller -type ProcessMonitor interface { - Start(*exec.Cmd) (chan Exit, error) - Wait(*exec.Cmd, chan Exit) (int, error) -} - -type defaultMonitor struct { -} - -func (m *defaultMonitor) Start(c *exec.Cmd) (chan Exit, error) { - if err := c.Start(); err != nil { - return nil, err - } - ec := make(chan Exit, 1) - go func() { - var status int - if err := c.Wait(); err != nil { - status = 255 - if exitErr, ok := err.(*exec.ExitError); ok { - if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok { - status = ws.ExitStatus() - } - } - } - ec <- Exit{ - Timestamp: time.Now(), - Pid: c.Process.Pid, - Status: status, - } - close(ec) - }() - return ec, nil -} - -func (m *defaultMonitor) Wait(c *exec.Cmd, ec chan Exit) (int, error) { - e := <-ec - return e.Status, nil -} diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go deleted file mode 100644 index f5f03ae..0000000 --- a/vendor/github.com/containerd/go-runc/runc.go +++ /dev/null @@ -1,741 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "time" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Format is the type of log formatting options avaliable -type Format string - -// TopBody represents the structured data of the full ps output -type TopResults struct { - // Processes running in the container, where each is process is an array of values corresponding to the headers - Processes [][]string `json:"Processes"` - - // Headers are the names of the columns - Headers []string `json:"Headers"` -} - -const ( - none Format = "" - JSON Format = "json" - Text Format = "text" - // DefaultCommand is the default command for Runc - DefaultCommand = "runc" -) - -// List returns all containers created inside the provided runc root directory -func (r *Runc) List(context context.Context) ([]*Container, error) { - data, err := cmdOutput(r.command(context, "list", "--format=json"), false, nil) - defer putBuf(data) - if err != nil { - return nil, err - } - var out []*Container - if err := json.Unmarshal(data.Bytes(), &out); err != nil { - return nil, err - } - return out, nil -} - -// State returns the state for the container provided by id -func (r *Runc) State(context context.Context, id string) (*Container, error) { - data, err := cmdOutput(r.command(context, "state", id), true, nil) - defer putBuf(data) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data.String()) - } - var c Container - if err := json.Unmarshal(data.Bytes(), &c); err != nil { - return nil, err - } - return &c, nil -} - -type ConsoleSocket interface { - Path() string -} - -type CreateOpts struct { - IO - // PidFile is a path to where a pid file should be created - PidFile string - ConsoleSocket ConsoleSocket - Detach bool - NoPivot bool - NoNewKeyring bool - ExtraFiles []*os.File - Started chan<- int -} - -func (o *CreateOpts) args() (out []string, err error) { - if o.PidFile != "" { - abs, err := filepath.Abs(o.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if o.ConsoleSocket != nil { - out = append(out, "--console-socket", o.ConsoleSocket.Path()) - } - if o.NoPivot { - out = append(out, "--no-pivot") - } - if o.NoNewKeyring { - out = append(out, "--no-new-keyring") - } - if o.Detach { - out = append(out, "--detach") - } - if o.ExtraFiles != nil { - out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles))) - } - return out, nil -} - -// Create creates a new container and returns its pid if it was created successfully -func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error { - args := []string{"create", "--bundle", bundle} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - cmd.ExtraFiles = opts.ExtraFiles - - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true, nil) - defer putBuf(data) - if err != nil { - return fmt.Errorf("%s: %s", err, data.String()) - } - return nil - } - ec, err := Monitor.Start(cmd) - if err != nil { - return err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) - } - return err -} - -// Start will start an already created container -func (r *Runc) Start(context context.Context, id string) error { - return r.runOrError(r.command(context, "start", id)) -} - -type ExecOpts struct { - IO - PidFile string - ConsoleSocket ConsoleSocket - Detach bool - Started chan<- int -} - -func (o *ExecOpts) args() (out []string, err error) { - if o.ConsoleSocket != nil { - out = append(out, "--console-socket", o.ConsoleSocket.Path()) - } - if o.Detach { - out = append(out, "--detach") - } - if o.PidFile != "" { - abs, err := filepath.Abs(o.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - return out, nil -} - -// Exec executes an additional process inside the container based on a full -// OCI Process specification -func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error { - if opts.Started != nil { - defer close(opts.Started) - } - f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "runc-process") - if err != nil { - return err - } - defer os.Remove(f.Name()) - err = json.NewEncoder(f).Encode(spec) - f.Close() - if err != nil { - return err - } - args := []string{"exec", "--process", f.Name()} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true, opts.Started) - defer putBuf(data) - if err != nil { - return fmt.Errorf("%w: %s", err, data.String()) - } - return nil - } - ec, err := Monitor.Start(cmd) - if err != nil { - return err - } - if opts.Started != nil { - opts.Started <- cmd.Process.Pid - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) - } - return err -} - -// Run runs the create, start, delete lifecycle of the container -// and returns its exit status after it has exited -func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) { - if opts.Started != nil { - defer close(opts.Started) - } - args := []string{"run", "--bundle", bundle} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return -1, err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - ec, err := Monitor.Start(cmd) - if err != nil { - return -1, err - } - if opts.Started != nil { - opts.Started <- cmd.Process.Pid - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) - } - return status, err -} - -type DeleteOpts struct { - Force bool -} - -func (o *DeleteOpts) args() (out []string) { - if o.Force { - out = append(out, "--force") - } - return out -} - -// Delete deletes the container -func (r *Runc) Delete(context context.Context, id string, opts *DeleteOpts) error { - args := []string{"delete"} - if opts != nil { - args = append(args, opts.args()...) - } - return r.runOrError(r.command(context, append(args, id)...)) -} - -// KillOpts specifies options for killing a container and its processes -type KillOpts struct { - All bool -} - -func (o *KillOpts) args() (out []string) { - if o.All { - out = append(out, "--all") - } - return out -} - -// Kill sends the specified signal to the container -func (r *Runc) Kill(context context.Context, id string, sig int, opts *KillOpts) error { - args := []string{ - "kill", - } - if opts != nil { - args = append(args, opts.args()...) - } - return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...)) -} - -// Stats return the stats for a container like cpu, memory, and io -func (r *Runc) Stats(context context.Context, id string) (*Stats, error) { - cmd := r.command(context, "events", "--stats", id) - rd, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - ec, err := Monitor.Start(cmd) - if err != nil { - return nil, err - } - defer func() { - rd.Close() - Monitor.Wait(cmd, ec) - }() - var e Event - if err := json.NewDecoder(rd).Decode(&e); err != nil { - return nil, err - } - return e.Stats, nil -} - -// Events returns an event stream from runc for a container with stats and OOM notifications -func (r *Runc) Events(context context.Context, id string, interval time.Duration) (chan *Event, error) { - cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id) - rd, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - ec, err := Monitor.Start(cmd) - if err != nil { - rd.Close() - return nil, err - } - var ( - dec = json.NewDecoder(rd) - c = make(chan *Event, 128) - ) - go func() { - defer func() { - close(c) - rd.Close() - Monitor.Wait(cmd, ec) - }() - for { - var e Event - if err := dec.Decode(&e); err != nil { - if err == io.EOF { - return - } - e = Event{ - Type: "error", - Err: err, - } - } - c <- &e - } - }() - return c, nil -} - -// Pause the container with the provided id -func (r *Runc) Pause(context context.Context, id string) error { - return r.runOrError(r.command(context, "pause", id)) -} - -// Resume the container with the provided id -func (r *Runc) Resume(context context.Context, id string) error { - return r.runOrError(r.command(context, "resume", id)) -} - -// Ps lists all the processes inside the container returning their pids -func (r *Runc) Ps(context context.Context, id string) ([]int, error) { - data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true, nil) - defer putBuf(data) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data.String()) - } - var pids []int - if err := json.Unmarshal(data.Bytes(), &pids); err != nil { - return nil, err - } - return pids, nil -} - -// Top lists all the processes inside the container returning the full ps data -func (r *Runc) Top(context context.Context, id string, psOptions string) (*TopResults, error) { - data, err := cmdOutput(r.command(context, "ps", "--format", "table", id, psOptions), true, nil) - defer putBuf(data) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data.String()) - } - - topResults, err := ParsePSOutput(data.Bytes()) - if err != nil { - return nil, fmt.Errorf("%s: ", err) - } - return topResults, nil -} - -type CheckpointOpts struct { - // ImagePath is the path for saving the criu image file - ImagePath string - // WorkDir is the working directory for criu - WorkDir string - // ParentPath is the path for previous image files from a pre-dump - ParentPath string - // AllowOpenTCP allows open tcp connections to be checkpointed - AllowOpenTCP bool - // AllowExternalUnixSockets allows external unix sockets to be checkpointed - AllowExternalUnixSockets bool - // AllowTerminal allows the terminal(pty) to be checkpointed with a container - AllowTerminal bool - // CriuPageServer is the address:port for the criu page server - CriuPageServer string - // FileLocks handle file locks held by the container - FileLocks bool - // Cgroups is the cgroup mode for how to handle the checkpoint of a container's cgroups - Cgroups CgroupMode - // EmptyNamespaces creates a namespace for the container but does not save its properties - // Provide the namespaces you wish to be checkpointed without their settings on restore - EmptyNamespaces []string - // LazyPages uses userfaultfd to lazily restore memory pages - LazyPages bool - // StatusFile is the file criu writes \0 to once lazy-pages is ready - StatusFile *os.File -} - -type CgroupMode string - -const ( - Soft CgroupMode = "soft" - Full CgroupMode = "full" - Strict CgroupMode = "strict" -) - -func (o *CheckpointOpts) args() (out []string) { - if o.ImagePath != "" { - out = append(out, "--image-path", o.ImagePath) - } - if o.WorkDir != "" { - out = append(out, "--work-path", o.WorkDir) - } - if o.ParentPath != "" { - out = append(out, "--parent-path", o.ParentPath) - } - if o.AllowOpenTCP { - out = append(out, "--tcp-established") - } - if o.AllowExternalUnixSockets { - out = append(out, "--ext-unix-sk") - } - if o.AllowTerminal { - out = append(out, "--shell-job") - } - if o.CriuPageServer != "" { - out = append(out, "--page-server", o.CriuPageServer) - } - if o.FileLocks { - out = append(out, "--file-locks") - } - if string(o.Cgroups) != "" { - out = append(out, "--manage-cgroups-mode", string(o.Cgroups)) - } - for _, ns := range o.EmptyNamespaces { - out = append(out, "--empty-ns", ns) - } - if o.LazyPages { - out = append(out, "--lazy-pages") - } - return out -} - -type CheckpointAction func([]string) []string - -// LeaveRunning keeps the container running after the checkpoint has been completed -func LeaveRunning(args []string) []string { - return append(args, "--leave-running") -} - -// PreDump allows a pre-dump of the checkpoint to be made and completed later -func PreDump(args []string) []string { - return append(args, "--pre-dump") -} - -// Checkpoint allows you to checkpoint a container using criu -func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error { - args := []string{"checkpoint"} - extraFiles := []*os.File{} - if opts != nil { - args = append(args, opts.args()...) - if opts.StatusFile != nil { - // pass the status file to the child process - extraFiles = []*os.File{opts.StatusFile} - // set status-fd to 3 as this will be the file descriptor - // of the first file passed with cmd.ExtraFiles - args = append(args, "--status-fd", "3") - } - } - for _, a := range actions { - args = a(args) - } - cmd := r.command(context, append(args, id)...) - cmd.ExtraFiles = extraFiles - return r.runOrError(cmd) -} - -type RestoreOpts struct { - CheckpointOpts - IO - - Detach bool - PidFile string - NoSubreaper bool - NoPivot bool - ConsoleSocket ConsoleSocket -} - -func (o *RestoreOpts) args() ([]string, error) { - out := o.CheckpointOpts.args() - if o.Detach { - out = append(out, "--detach") - } - if o.PidFile != "" { - abs, err := filepath.Abs(o.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if o.ConsoleSocket != nil { - out = append(out, "--console-socket", o.ConsoleSocket.Path()) - } - if o.NoPivot { - out = append(out, "--no-pivot") - } - if o.NoSubreaper { - out = append(out, "-no-subreaper") - } - return out, nil -} - -// Restore restores a container with the provide id from an existing checkpoint -func (r *Runc) Restore(context context.Context, id, bundle string, opts *RestoreOpts) (int, error) { - args := []string{"restore"} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return -1, err - } - args = append(args, oargs...) - } - args = append(args, "--bundle", bundle) - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - ec, err := Monitor.Start(cmd) - if err != nil { - return -1, err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return -1, err - } - } - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) - } - return status, err -} - -// Update updates the current container with the provided resource spec -func (r *Runc) Update(context context.Context, id string, resources *specs.LinuxResources) error { - buf := getBuf() - defer putBuf(buf) - - if err := json.NewEncoder(buf).Encode(resources); err != nil { - return err - } - args := []string{"update", "--resources", "-", id} - cmd := r.command(context, args...) - cmd.Stdin = buf - return r.runOrError(cmd) -} - -var ErrParseRuncVersion = errors.New("unable to parse runc version") - -type Version struct { - Runc string - Commit string - Spec string -} - -// Version returns the runc and runtime-spec versions -func (r *Runc) Version(context context.Context) (Version, error) { - data, err := cmdOutput(r.command(context, "--version"), false, nil) - defer putBuf(data) - if err != nil { - return Version{}, err - } - return parseVersion(data.Bytes()) -} - -func parseVersion(data []byte) (Version, error) { - var v Version - parts := strings.Split(strings.TrimSpace(string(data)), "\n") - - if len(parts) > 0 { - if !strings.HasPrefix(parts[0], "runc version ") { - return v, nil - } - v.Runc = parts[0][13:] - - for _, part := range parts[1:] { - if strings.HasPrefix(part, "commit: ") { - v.Commit = part[8:] - } else if strings.HasPrefix(part, "spec: ") { - v.Spec = part[6:] - } - } - } - - return v, nil -} - -func (r *Runc) args() (out []string) { - if r.Root != "" { - out = append(out, "--root", r.Root) - } - if r.Debug { - out = append(out, "--debug") - } - if r.Log != "" { - out = append(out, "--log", r.Log) - } - if r.LogFormat != none { - out = append(out, "--log-format", string(r.LogFormat)) - } - if r.Criu != "" { - out = append(out, "--criu", r.Criu) - } - if r.SystemdCgroup { - out = append(out, "--systemd-cgroup") - } - if r.Rootless != nil { - // nil stands for "auto" (differs from explicit "false") - out = append(out, "--rootless="+strconv.FormatBool(*r.Rootless)) - } - return out -} - -// runOrError will run the provided command. If an error is -// encountered and neither Stdout or Stderr was set the error and the -// stderr of the command will be returned in the format of : -// -func (r *Runc) runOrError(cmd *exec.Cmd) error { - if cmd.Stdout != nil || cmd.Stderr != nil { - ec, err := Monitor.Start(cmd) - if err != nil { - return err - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) - } - return err - } - data, err := cmdOutput(cmd, true, nil) - defer putBuf(data) - if err != nil { - return fmt.Errorf("%s: %s", err, data.String()) - } - return nil -} - -// callers of cmdOutput are expected to call putBuf on the returned Buffer -// to ensure it is released back to the shared pool after use. -func cmdOutput(cmd *exec.Cmd, combined bool, started chan<- int) (*bytes.Buffer, error) { - b := getBuf() - - cmd.Stdout = b - if combined { - cmd.Stderr = b - } - ec, err := Monitor.Start(cmd) - if err != nil { - return nil, err - } - if started != nil { - started <- cmd.Process.Pid - } - - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) - } - - return b, err -} - -type ExitError struct { - Status int -} - -func (e *ExitError) Error() string { - return fmt.Sprintf("exit status %d", e.Status) -} diff --git a/vendor/github.com/containerd/go-runc/runc_unix.go b/vendor/github.com/containerd/go-runc/runc_unix.go deleted file mode 100644 index 548ffd6..0000000 --- a/vendor/github.com/containerd/go-runc/runc_unix.go +++ /dev/null @@ -1,38 +0,0 @@ -//+build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "golang.org/x/sys/unix" -) - -// Runc is the client to the runc cli -type Runc struct { - //If command is empty, DefaultCommand is used - Command string - Root string - Debug bool - Log string - LogFormat Format - PdeathSignal unix.Signal - Setpgid bool - Criu string - SystemdCgroup bool - Rootless *bool // nil stands for "auto" -} diff --git a/vendor/github.com/containerd/go-runc/runc_windows.go b/vendor/github.com/containerd/go-runc/runc_windows.go deleted file mode 100644 index c5873de..0000000 --- a/vendor/github.com/containerd/go-runc/runc_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -// Runc is the client to the runc cli -type Runc struct { - //If command is empty, DefaultCommand is used - Command string - Root string - Debug bool - Log string - LogFormat Format - Setpgid bool - Criu string - SystemdCgroup bool - Rootless *bool // nil stands for "auto" -} diff --git a/vendor/github.com/containerd/go-runc/utils.go b/vendor/github.com/containerd/go-runc/utils.go deleted file mode 100644 index 948b633..0000000 --- a/vendor/github.com/containerd/go-runc/utils.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "bytes" - "io/ioutil" - "strconv" - "strings" - "sync" - "syscall" -) - -// ReadPidFile reads the pid file at the provided path and returns -// the pid or an error if the read and conversion is unsuccessful -func ReadPidFile(path string) (int, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return -1, err - } - return strconv.Atoi(string(data)) -} - -const exitSignalOffset = 128 - -// exitStatus returns the correct exit status for a process based on if it -// was signaled or exited cleanly -func exitStatus(status syscall.WaitStatus) int { - if status.Signaled() { - return exitSignalOffset + int(status.Signal()) - } - return status.ExitStatus() -} - -var bytesBufferPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -func getBuf() *bytes.Buffer { - return bytesBufferPool.Get().(*bytes.Buffer) -} - -func putBuf(b *bytes.Buffer) { - if b == nil { - return - } - - b.Reset() - bytesBufferPool.Put(b) -} - -// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces -func fieldsASCII(s string) []string { - fn := func(r rune) bool { - switch r { - case '\t', '\n', '\f', '\r', ' ': - return true - } - return false - } - return strings.FieldsFunc(s, fn) -} - -// ParsePSOutput parses the runtime's ps raw output and returns a TopResults -func ParsePSOutput(output []byte) (*TopResults, error) { - topResults := &TopResults{} - - lines := strings.Split(string(output), "\n") - topResults.Headers = fieldsASCII(lines[0]) - - pidIndex := -1 - for i, name := range topResults.Headers { - if name == "PID" { - pidIndex = i - } - } - - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - - fields := fieldsASCII(line) - - if fields[pidIndex] == "-" { - continue - } - - process := fields[:len(topResults.Headers)-1] - process = append(process, strings.Join(fields[len(topResults.Headers)-1:], " ")) - topResults.Processes = append(topResults.Processes, process) - - } - return topResults, nil -} diff --git a/vendor/github.com/containerd/imgcrypt/.gitignore b/vendor/github.com/containerd/imgcrypt/.gitignore index 814d8e1..10da9f7 100644 --- a/vendor/github.com/containerd/imgcrypt/.gitignore +++ b/vendor/github.com/containerd/imgcrypt/.gitignore @@ -1,3 +1,2 @@ *~ -/ctr -/ctd-decoder +/bin/ diff --git a/vendor/github.com/containerd/imgcrypt/.golangci.yml b/vendor/github.com/containerd/imgcrypt/.golangci.yml index 9b16b5c..13ec860 100644 --- a/vendor/github.com/containerd/imgcrypt/.golangci.yml +++ b/vendor/github.com/containerd/imgcrypt/.golangci.yml @@ -6,7 +6,7 @@ linters: - unconvert - gofmt - goimports - - golint + - revive - ineffassign - vet - unused diff --git a/vendor/github.com/containerd/imgcrypt/CHANGES b/vendor/github.com/containerd/imgcrypt/CHANGES index cecf28f..0478be4 100644 --- a/vendor/github.com/containerd/imgcrypt/CHANGES +++ b/vendor/github.com/containerd/imgcrypt/CHANGES @@ -1,5 +1,28 @@ CHANGES +v1.1.4: + - Fixed issue in CheckAuthorization() callpath for images with a ManifestList + - CVE-2022-24778 + - Fix: https://github.com/containerd/imgcrypt/commit/6fdd9818a4d8142107b7ecd767d839c9707700d9 + - Added test case covering this + - Updated to ocicrypt 1.1.3 + - Updated to containerd 1.6.1 + +v1.1.3: + - Release v1.1.3 addresses issue #62 due to re-tagging of v1.1.2 + - docs: update referenced containerd project branch to main + - Update linter to match containerd repo + - Update CI golang version + - Updated to containerd 1.5.8 + +v1.1.2: + - Decouple CreateCryptoConfig() from github.com/urfave/cli + - Updated to containerd 1.5.7 + - Implemented ConvertFunc for image en- and decryption + - Replace pkg/errors with errors package + - Updated to ocicrypt 1.1.2 + - Sync'ed ctr-enc with ctr of containerd-1.5.0 + v1.1.1: - rebased on ocicrypt 1.1.1 diff --git a/vendor/github.com/containerd/imgcrypt/MAINTAINERS b/vendor/github.com/containerd/imgcrypt/MAINTAINERS index acb227c..15c43be 100644 --- a/vendor/github.com/containerd/imgcrypt/MAINTAINERS +++ b/vendor/github.com/containerd/imgcrypt/MAINTAINERS @@ -1,7 +1,7 @@ # imgcrypt maintainers # -# As a containerd sub-project, containerd maintainers are also included from https://github.com/containerd/project/blob/master/MAINTAINERS. -# See https://github.com/containerd/project/blob/master/GOVERNANCE.md for description of maintainer role +# As a containerd sub-project, containerd maintainers are also included from https://github.com/containerd/project/blob/main/MAINTAINERS. +# See https://github.com/containerd/project/blob/main/GOVERNANCE.md for description of maintainer role # # MAINTAINERS # GitHub ID, Name, Email address diff --git a/vendor/github.com/containerd/imgcrypt/Makefile b/vendor/github.com/containerd/imgcrypt/Makefile index 88c331e..df6d9b5 100644 --- a/vendor/github.com/containerd/imgcrypt/Makefile +++ b/vendor/github.com/containerd/imgcrypt/Makefile @@ -16,9 +16,14 @@ # Base path used to install. DESTDIR ?= /usr/local +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +CTR_LDFLAGS=-ldflags '-X github.com/containerd/containerd/version.Version=$(VERSION)' COMMANDS=ctd-decoder ctr-enc +RELEASE_COMMANDS=ctd-decoder BINARIES=$(addprefix bin/,$(COMMANDS)) +RELEASE_BINARIES=$(addprefix bin/,$(RELEASE_COMMANDS)) .PHONY: check build ctd-decoder @@ -32,7 +37,7 @@ bin/ctd-decoder: cmd/ctd-decoder FORCE go build -o $@ -v ./cmd/ctd-decoder/ bin/ctr-enc: cmd/ctr FORCE - go build -o $@ -v ./cmd/ctr/ + go build -o $@ ${CTR_LDFLAGS} -v ./cmd/ctr/ check: @echo "$@" @@ -44,6 +49,11 @@ install: @mkdir -p $(DESTDIR)/bin @install $(BINARIES) $(DESTDIR)/bin +containerd-release: + @echo "$@" + @mkdir -p $(DESTDIR)/bin + @install $(RELEASE_BINARIES) $(DESTDIR)/bin + uninstall: @echo "$@" @rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES))) diff --git a/vendor/github.com/containerd/imgcrypt/README.md b/vendor/github.com/containerd/imgcrypt/README.md index c90fdd5..01ea1c8 100644 --- a/vendor/github.com/containerd/imgcrypt/README.md +++ b/vendor/github.com/containerd/imgcrypt/README.md @@ -11,7 +11,7 @@ of containerd's `ctr` tool (`ctr-enc') with support for encrypting and decryptin # Usage `imgcrypt` requires containerd 1.3 or later. Containerd 1.4 or later is required when used with Kubernetes. -For configuration instructions for kubernetes, please consult the [CRI decryption document](https://github.com/containerd/containerd/blob/master/docs/decryption.md). +For configuration instructions for kubernetes, please consult the [CRI decryption document](https://github.com/containerd/containerd/blob/main/docs/cri/decryption.md). Build and install `imgcrypt`: @@ -96,8 +96,8 @@ Hello World! **imgcrypt** is a non-core containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), * [Maintainers](MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/cgroups/v2/hugetlb.go b/vendor/github.com/containerd/imgcrypt/images/encryption/any.go similarity index 62% rename from vendor/github.com/containerd/cgroups/v2/hugetlb.go rename to vendor/github.com/containerd/imgcrypt/images/encryption/any.go index 16b35bd..7a89a8c 100644 --- a/vendor/github.com/containerd/cgroups/v2/hugetlb.go +++ b/vendor/github.com/containerd/imgcrypt/images/encryption/any.go @@ -14,24 +14,29 @@ limitations under the License. */ -package v2 +package encryption -import "strings" +import "github.com/gogo/protobuf/types" -type HugeTlb []HugeTlbEntry +type anyMap map[string]*types.Any -type HugeTlbEntry struct { - HugePageSize string - Limit uint64 +type any interface { + GetTypeUrl() string + GetValue() []byte } -func (r *HugeTlb) Values() (o []Value) { - for _, e := range *r { - o = append(o, Value{ - filename: strings.Join([]string{"hugetlb", e.HugePageSize, "max"}, "."), - value: e.Limit, - }) +func fromAny(from any) *types.Any { + if from == nil { + return nil } - return o + pbany, ok := from.(*types.Any) + if ok { + return pbany + } + + return &types.Any{ + TypeUrl: from.GetTypeUrl(), + Value: from.GetValue(), + } } diff --git a/vendor/github.com/containerd/imgcrypt/images/encryption/client.go b/vendor/github.com/containerd/imgcrypt/images/encryption/client.go index 6419d59..f9ea76f 100644 --- a/vendor/github.com/containerd/imgcrypt/images/encryption/client.go +++ b/vendor/github.com/containerd/imgcrypt/images/encryption/client.go @@ -18,6 +18,7 @@ package encryption import ( "context" + "fmt" "github.com/containerd/containerd" "github.com/containerd/containerd/containers" @@ -25,26 +26,27 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/imgcrypt" "github.com/containerd/typeurl" + encconfig "github.com/containers/ocicrypt/config" - "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // WithDecryptedUnpack allows to pass parameters the 'layertool' needs to the applier func WithDecryptedUnpack(data *imgcrypt.Payload) diff.ApplyOpt { return func(_ context.Context, desc ocispec.Descriptor, c *diff.ApplyConfig) error { if c.ProcessorPayloads == nil { - c.ProcessorPayloads = make(map[string]*types.Any) + c.ProcessorPayloads = make(anyMap) } data.Descriptor = desc any, err := typeurl.MarshalAny(data) if err != nil { - return errors.Wrapf(err, "failed to marshal payload") + return fmt.Errorf("failed to marshal payload: %w", err) } + pbany := fromAny(any) + for _, id := range imgcrypt.PayloadToolIDs { - c.ProcessorPayloads[id] = any + c.ProcessorPayloads[id] = pbany } return nil } diff --git a/vendor/github.com/containerd/imgcrypt/images/encryption/encryption.go b/vendor/github.com/containerd/imgcrypt/images/encryption/encryption.go index 67e7767..291424d 100644 --- a/vendor/github.com/containerd/imgcrypt/images/encryption/encryption.go +++ b/vendor/github.com/containerd/imgcrypt/images/encryption/encryption.go @@ -20,22 +20,22 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "math/rand" - "github.com/containerd/containerd/images" - "github.com/containers/ocicrypt" - encconfig "github.com/containers/ocicrypt/config" - "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/platforms" - encocispec "github.com/containers/ocicrypt/spec" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - "github.com/pkg/errors" + "github.com/containers/ocicrypt" + encconfig "github.com/containers/ocicrypt/config" + encocispec "github.com/containers/ocicrypt/spec" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -123,7 +123,7 @@ func encryptLayer(cc *encconfig.CryptoConfig, dataReader content.ReaderAt, desc newDesc.MediaType = encocispec.MediaTypeLayerEnc default: - return ocispec.Descriptor{}, nil, nil, errors.Errorf("Encryption: unsupporter layer MediaType: %s\n", desc.MediaType) + return ocispec.Descriptor{}, nil, nil, fmt.Errorf("unsupporter layer MediaType: %s", desc.MediaType) } return newDesc, encLayerReader, encLayerFinalizer, nil @@ -148,7 +148,7 @@ func DecryptLayer(dc *encconfig.DecryptConfig, dataReader io.Reader, desc ocispe case encocispec.MediaTypeLayerEnc: newDesc.MediaType = images.MediaTypeDockerSchema2Layer default: - return ocispec.Descriptor{}, nil, "", errors.Errorf("Decryption: unsupporter layer MediaType: %s\n", desc.MediaType) + return ocispec.Descriptor{}, nil, "", fmt.Errorf("unsupporter layer MediaType: %s", desc.MediaType) } return newDesc, resultReader, layerDigest, nil } @@ -173,7 +173,7 @@ func decryptLayer(cc *encconfig.CryptoConfig, dataReader content.ReaderAt, desc case encocispec.MediaTypeLayerEnc: newDesc.MediaType = images.MediaTypeDockerSchema2Layer default: - return ocispec.Descriptor{}, nil, errors.Errorf("Decryption: unsupporter layer MediaType: %s\n", desc.MediaType) + return ocispec.Descriptor{}, nil, fmt.Errorf("unsupporter layer MediaType: %s", desc.MediaType) } return newDesc, resultReader, nil } @@ -216,7 +216,7 @@ func cryptLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor, if haveDigest { if err := content.WriteBlob(ctx, cs, ref, resultReader, newDesc); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, fmt.Errorf("failed to write config: %w", err) } } else { newDesc.Digest, newDesc.Size, err = ingestReader(ctx, cs, ref, resultReader) @@ -230,7 +230,7 @@ func cryptLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor, if encLayerFinalizer != nil { annotations, err := encLayerFinalizer() if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "Error getting annotations from encLayer finalizer") + return ocispec.Descriptor{}, fmt.Errorf("error getting annotations from encLayer finalizer: %w", err) } for k, v := range annotations { newDesc.Annotations[k] = v @@ -242,22 +242,22 @@ func cryptLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor, func ingestReader(ctx context.Context, cs content.Ingester, ref string, r io.Reader) (digest.Digest, int64, error) { cw, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) if err != nil { - return "", 0, errors.Wrap(err, "failed to open writer") + return "", 0, fmt.Errorf("failed to open writer: %w", err) } defer cw.Close() if _, err := content.CopyReader(cw, r); err != nil { - return "", 0, errors.Wrap(err, "copy failed") + return "", 0, fmt.Errorf("copy failed: %w", err) } st, err := cw.Status() if err != nil { - return "", 0, errors.Wrap(err, "failed to get state") + return "", 0, fmt.Errorf("failed to get state: %w", err) } if err := cw.Commit(ctx, st.Offset, ""); err != nil { if !errdefs.IsAlreadyExists(err) { - return "", 0, errors.Wrapf(err, "failed commit on ref %q", ref) + return "", 0, fmt.Errorf("failed commit on ref %q: %w", ref, err) } } @@ -311,7 +311,7 @@ func cryptChildren(ctx context.Context, cs content.Store, desc ocispec.Descripto // never encrypt/decrypt newLayers = append(newLayers, child) default: - return ocispec.Descriptor{}, false, errors.Errorf("bad/unhandled MediaType %s in encryptChildren\n", child.MediaType) + return ocispec.Descriptor{}, false, fmt.Errorf("bad/unhandled MediaType %s in encryptChildren", child.MediaType) } } @@ -326,7 +326,7 @@ func cryptChildren(ctx context.Context, cs content.Store, desc ocispec.Descripto mb, err := json.MarshalIndent(newManifest, "", " ") if err != nil { - return ocispec.Descriptor{}, false, errors.Wrap(err, "failed to marshal image") + return ocispec.Descriptor{}, false, fmt.Errorf("failed to marshal image: %w", err) } newDesc := ocispec.Descriptor{ @@ -345,7 +345,7 @@ func cryptChildren(ctx context.Context, cs content.Store, desc ocispec.Descripto ref := fmt.Sprintf("manifest-%s", newDesc.Digest.String()) if err := content.WriteBlob(ctx, cs, ref, bytes.NewReader(mb), newDesc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, false, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, false, fmt.Errorf("failed to write config: %w", err) } return newDesc, true, nil } @@ -412,7 +412,7 @@ func cryptManifestList(ctx context.Context, cs content.Store, desc ocispec.Descr mb, err := json.MarshalIndent(newIndex, "", " ") if err != nil { - return ocispec.Descriptor{}, false, errors.Wrap(err, "failed to marshal index") + return ocispec.Descriptor{}, false, fmt.Errorf("failed to marshal index: %w", err) } newDesc := ocispec.Descriptor{ @@ -429,7 +429,7 @@ func cryptManifestList(ctx context.Context, cs content.Store, desc ocispec.Descr ref := fmt.Sprintf("index-%s", newDesc.Digest.String()) if err = content.WriteBlob(ctx, cs, ref, bytes.NewReader(mb), newDesc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, false, errors.Wrap(err, "failed to write index") + return ocispec.Descriptor{}, false, fmt.Errorf("failed to write index: %w", err) } return newDesc, true, nil } @@ -441,7 +441,7 @@ func cryptManifestList(ctx context.Context, cs content.Store, desc ocispec.Descr // representing a manifest list or a single manifest func cryptImage(ctx context.Context, cs content.Store, desc ocispec.Descriptor, cc *encconfig.CryptoConfig, lf LayerFilter, cryptoOp cryptoOp) (ocispec.Descriptor, bool, error) { if cc == nil { - return ocispec.Descriptor{}, false, errors.Wrapf(errdefs.ErrInvalidArgument, "CryptoConfig must not be nil") + return ocispec.Descriptor{}, false, errors.New("invalid argument: CryptoConfig must not be nil") } switch desc.MediaType { case ocispec.MediaTypeImageIndex, images.MediaTypeDockerSchema2ManifestList: @@ -449,7 +449,7 @@ func cryptImage(ctx context.Context, cs content.Store, desc ocispec.Descriptor, case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: return cryptManifest(ctx, cs, desc, cc, lf, cryptoOp) default: - return ocispec.Descriptor{}, false, errors.Errorf("CryptImage: Unhandled media type: %s", desc.MediaType) + return ocispec.Descriptor{}, false, fmt.Errorf("unhandled media type: %s", desc.MediaType) } } @@ -463,6 +463,28 @@ func DecryptImage(ctx context.Context, cs content.Store, desc ocispec.Descriptor return cryptImage(ctx, cs, desc, cc, lf, cryptoOpDecrypt) } +// GetImageEncryptConverter returns a converter function for image encryption +func GetImageEncryptConverter(cc *encconfig.CryptoConfig, lf LayerFilter) converter.ConvertFunc { + return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + newDesc, _, err := EncryptImage(ctx, cs, desc, cc, lf) + if err != nil { + return nil, err + } + return &newDesc, nil + } +} + +// GetImageDecryptConverter returns a converter function for image decryption +func GetImageDecryptConverter(cc *encconfig.CryptoConfig, lf LayerFilter) converter.ConvertFunc { + return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + newDesc, _, err := DecryptImage(ctx, cs, desc, cc, lf) + if err != nil { + return nil, err + } + return &newDesc, nil + } +} + // CheckAuthorization checks whether a user has the right keys to be allowed to access an image (every layer) // It takes decrypting of the layers only as far as decrypting the asymmetrically encrypted data // The decryption is only done for the current platform @@ -475,7 +497,7 @@ func CheckAuthorization(ctx context.Context, cs content.Store, desc ocispec.Desc _, _, err := cryptImage(ctx, cs, desc, &cc, lf, cryptoOpUnwrapOnly) if err != nil { - return errors.Wrapf(err, "you are not authorized to use this image") + return fmt.Errorf("you are not authorized to use this image: %w", err) } return nil } diff --git a/vendor/github.com/containerd/imgcrypt/payload.go b/vendor/github.com/containerd/imgcrypt/payload.go index 7d92543..0c8a48f 100644 --- a/vendor/github.com/containerd/imgcrypt/payload.go +++ b/vendor/github.com/containerd/imgcrypt/payload.go @@ -18,6 +18,7 @@ package imgcrypt import ( "github.com/containerd/typeurl" + encconfig "github.com/containers/ocicrypt/config" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/containerd/ttrpc/.gitignore b/vendor/github.com/containerd/ttrpc/.gitignore deleted file mode 100644 index ea58090..0000000 --- a/vendor/github.com/containerd/ttrpc/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out diff --git a/vendor/github.com/containerd/ttrpc/LICENSE b/vendor/github.com/containerd/ttrpc/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/containerd/ttrpc/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/ttrpc/README.md b/vendor/github.com/containerd/ttrpc/README.md deleted file mode 100644 index 547a129..0000000 --- a/vendor/github.com/containerd/ttrpc/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# ttrpc - -[![Build Status](https://github.com/containerd/ttrpc/workflows/CI/badge.svg)](https://github.com/containerd/ttrpc/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/ttrpc/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/ttrpc) - -GRPC for low-memory environments. - -The existing grpc-go project requires a lot of memory overhead for importing -packages and at runtime. While this is great for many services with low density -requirements, this can be a problem when running a large number of services on -a single machine or on a machine with a small amount of memory. - -Using the same GRPC definitions, this project reduces the binary size and -protocol overhead required. We do this by eliding the `net/http`, `net/http2` -and `grpc` package used by grpc replacing it with a lightweight framing -protocol. The result are smaller binaries that use less resident memory with -the same ease of use as GRPC. - -Please note that while this project supports generating either end of the -protocol, the generated service definitions will be incompatible with regular -GRPC services, as they do not speak the same protocol. - -# Usage - -Create a gogo vanity binary (see -[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an -example with the ttrpc plugin enabled. - -It's recommended to use [`protobuild`](https://github.com//stevvooe/protobuild) -to build the protobufs for this project, but this will work with protoc -directly, if required. - -# Differences from GRPC - -- The protocol stack has been replaced with a lighter protocol that doesn't - require http, http2 and tls. -- The client and server interface are identical whereas in GRPC there is a - client and server interface that are different. -- The Go stdlib context package is used instead. -- No support for streams yet. - -# Status - -TODO: - -- [ ] Document protocol layout -- [ ] Add testing under concurrent load to ensure -- [ ] Verify connection error handling - -# Project details - -ttrpc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/ttrpc/channel.go b/vendor/github.com/containerd/ttrpc/channel.go deleted file mode 100644 index 81116a5..0000000 --- a/vendor/github.com/containerd/ttrpc/channel.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "bufio" - "encoding/binary" - "fmt" - "io" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - messageHeaderLength = 10 - messageLengthMax = 4 << 20 -) - -type messageType uint8 - -const ( - messageTypeRequest messageType = 0x1 - messageTypeResponse messageType = 0x2 -) - -// messageHeader represents the fixed-length message header of 10 bytes sent -// with every request. -type messageHeader struct { - Length uint32 // length excluding this header. b[:4] - StreamID uint32 // identifies which request stream message is a part of. b[4:8] - Type messageType // message type b[8] - Flags uint8 // reserved b[9] -} - -func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { - _, err := io.ReadFull(r, p[:messageHeaderLength]) - if err != nil { - return messageHeader{}, err - } - - return messageHeader{ - Length: binary.BigEndian.Uint32(p[:4]), - StreamID: binary.BigEndian.Uint32(p[4:8]), - Type: messageType(p[8]), - Flags: p[9], - }, nil -} - -func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error { - binary.BigEndian.PutUint32(p[:4], mh.Length) - binary.BigEndian.PutUint32(p[4:8], mh.StreamID) - p[8] = byte(mh.Type) - p[9] = mh.Flags - - _, err := w.Write(p[:]) - return err -} - -var buffers sync.Pool - -type channel struct { - conn net.Conn - bw *bufio.Writer - br *bufio.Reader - hrbuf [messageHeaderLength]byte // avoid alloc when reading header - hwbuf [messageHeaderLength]byte -} - -func newChannel(conn net.Conn) *channel { - return &channel{ - conn: conn, - bw: bufio.NewWriter(conn), - br: bufio.NewReader(conn), - } -} - -// recv a message from the channel. The returned buffer contains the message. -// -// If a valid grpc status is returned, the message header -// returned will be valid and caller should send that along to -// the correct consumer. The bytes on the underlying channel -// will be discarded. -func (ch *channel) recv() (messageHeader, []byte, error) { - mh, err := readMessageHeader(ch.hrbuf[:], ch.br) - if err != nil { - return messageHeader{}, nil, err - } - - if mh.Length > uint32(messageLengthMax) { - if _, err := ch.br.Discard(int(mh.Length)); err != nil { - return mh, nil, fmt.Errorf("failed to discard after receiving oversized message: %w", err) - } - - return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) - } - - p := ch.getmbuf(int(mh.Length)) - if _, err := io.ReadFull(ch.br, p); err != nil { - return messageHeader{}, nil, fmt.Errorf("failed reading message: %w", err) - } - - return mh, p, nil -} - -func (ch *channel) send(streamID uint32, t messageType, p []byte) error { - if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { - return err - } - - _, err := ch.bw.Write(p) - if err != nil { - return err - } - - return ch.bw.Flush() -} - -func (ch *channel) getmbuf(size int) []byte { - // we can't use the standard New method on pool because we want to allocate - // based on size. - b, ok := buffers.Get().(*[]byte) - if !ok || cap(*b) < size { - // TODO(stevvooe): It may be better to allocate these in fixed length - // buckets to reduce fragmentation but its not clear that would help - // with performance. An ilogb approach or similar would work well. - bb := make([]byte, size) - b = &bb - } else { - *b = (*b)[:size] - } - return *b -} - -func (ch *channel) putmbuf(p []byte) { - buffers.Put(&p) -} diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go deleted file mode 100644 index 26c3dd2..0000000 --- a/vendor/github.com/containerd/ttrpc/client.go +++ /dev/null @@ -1,409 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "io" - "net" - "os" - "strings" - "sync" - "syscall" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ErrClosed is returned by client methods when the underlying connection is -// closed. -var ErrClosed = errors.New("ttrpc: closed") - -// Client for a ttrpc server -type Client struct { - codec codec - conn net.Conn - channel *channel - calls chan *callRequest - - ctx context.Context - closed func() - - closeOnce sync.Once - userCloseFunc func() - userCloseWaitCh chan struct{} - - errOnce sync.Once - err error - interceptor UnaryClientInterceptor -} - -// ClientOpts configures a client -type ClientOpts func(c *Client) - -// WithOnClose sets the close func whenever the client's Close() method is called -func WithOnClose(onClose func()) ClientOpts { - return func(c *Client) { - c.userCloseFunc = onClose - } -} - -// WithUnaryClientInterceptor sets the provided client interceptor -func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { - return func(c *Client) { - c.interceptor = i - } -} - -func NewClient(conn net.Conn, opts ...ClientOpts) *Client { - ctx, cancel := context.WithCancel(context.Background()) - c := &Client{ - codec: codec{}, - conn: conn, - channel: newChannel(conn), - calls: make(chan *callRequest), - closed: cancel, - ctx: ctx, - userCloseFunc: func() {}, - userCloseWaitCh: make(chan struct{}), - interceptor: defaultClientInterceptor, - } - - for _, o := range opts { - o(c) - } - - go c.run() - return c -} - -type callRequest struct { - ctx context.Context - req *Request - resp *Response // response will be written back here - errs chan error // error written here on completion -} - -func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { - payload, err := c.codec.Marshal(req) - if err != nil { - return err - } - - var ( - creq = &Request{ - Service: service, - Method: method, - Payload: payload, - } - - cresp = &Response{} - ) - - if metadata, ok := GetMetadata(ctx); ok { - metadata.setRequest(creq) - } - - if dl, ok := ctx.Deadline(); ok { - creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds() - } - - info := &UnaryClientInfo{ - FullMethod: fullPath(service, method), - } - if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil { - return err - } - - if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil { - return err - } - - if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) { - return status.ErrorProto(cresp.Status) - } - return nil -} - -func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { - errs := make(chan error, 1) - call := &callRequest{ - ctx: ctx, - req: req, - resp: resp, - errs: errs, - } - - select { - case <-ctx.Done(): - return ctx.Err() - case c.calls <- call: - case <-c.ctx.Done(): - return c.error() - } - - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-errs: - return filterCloseErr(err) - case <-c.ctx.Done(): - return c.error() - } -} - -func (c *Client) Close() error { - c.closeOnce.Do(func() { - c.closed() - }) - return nil -} - -// UserOnCloseWait is used to blocks untils the user's on-close callback -// finishes. -func (c *Client) UserOnCloseWait(ctx context.Context) error { - select { - case <-c.userCloseWaitCh: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -type message struct { - messageHeader - p []byte - err error -} - -// callMap provides access to a map of active calls, guarded by a mutex. -type callMap struct { - m sync.Mutex - activeCalls map[uint32]*callRequest - closeErr error -} - -// newCallMap returns a new callMap with an empty set of active calls. -func newCallMap() *callMap { - return &callMap{ - activeCalls: make(map[uint32]*callRequest), - } -} - -// set adds a call entry to the map with the given streamID key. -func (cm *callMap) set(streamID uint32, cr *callRequest) error { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return cm.closeErr - } - cm.activeCalls[streamID] = cr - return nil -} - -// get looks up the call entry for the given streamID key, then removes it -// from the map and returns it. -func (cm *callMap) get(streamID uint32) (cr *callRequest, ok bool, err error) { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return nil, false, cm.closeErr - } - cr, ok = cm.activeCalls[streamID] - if ok { - delete(cm.activeCalls, streamID) - } - return -} - -// abort sends the given error to each active call, and clears the map. -// Once abort has been called, any subsequent calls to the callMap will return the error passed to abort. -func (cm *callMap) abort(err error) error { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return cm.closeErr - } - for streamID, call := range cm.activeCalls { - call.errs <- err - delete(cm.activeCalls, streamID) - } - cm.closeErr = err - return nil -} - -func (c *Client) run() { - var ( - waiters = newCallMap() - receiverDone = make(chan struct{}) - ) - - // Sender goroutine - // Receives calls from dispatch, adds them to the set of active calls, and sends them - // to the server. - go func() { - var streamID uint32 = 1 - for { - select { - case <-c.ctx.Done(): - return - case call := <-c.calls: - id := streamID - streamID += 2 // enforce odd client initiated request ids - if err := waiters.set(id, call); err != nil { - call.errs <- err // errs is buffered so should not block. - continue - } - if err := c.send(id, messageTypeRequest, call.req); err != nil { - call.errs <- err // errs is buffered so should not block. - waiters.get(id) // remove from waiters set - } - } - } - }() - - // Receiver goroutine - // Receives responses from the server, looks up the call info in the set of active calls, - // and notifies the caller of the response. - go func() { - defer close(receiverDone) - for { - select { - case <-c.ctx.Done(): - c.setError(c.ctx.Err()) - return - default: - mh, p, err := c.channel.recv() - if err != nil { - _, ok := status.FromError(err) - if !ok { - // treat all errors that are not an rpc status as terminal. - // all others poison the connection. - c.setError(filterCloseErr(err)) - return - } - } - msg := &message{ - messageHeader: mh, - p: p[:mh.Length], - err: err, - } - call, ok, err := waiters.get(mh.StreamID) - if err != nil { - logrus.Errorf("ttrpc: failed to look up active call: %s", err) - continue - } - if !ok { - logrus.Errorf("ttrpc: received message for unknown channel %v", mh.StreamID) - continue - } - call.errs <- c.recv(call.resp, msg) - } - } - }() - - defer func() { - c.conn.Close() - c.userCloseFunc() - close(c.userCloseWaitCh) - }() - - for { - select { - case <-receiverDone: - // The receiver has exited. - // don't return out, let the close of the context trigger the abort of waiters - c.Close() - case <-c.ctx.Done(): - // Abort all active calls. This will also prevent any new calls from being added - // to waiters. - waiters.abort(c.error()) - return - } - } -} - -func (c *Client) error() error { - c.errOnce.Do(func() { - if c.err == nil { - c.err = ErrClosed - } - }) - return c.err -} - -func (c *Client) setError(err error) { - c.errOnce.Do(func() { - c.err = err - }) -} - -func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error { - p, err := c.codec.Marshal(msg) - if err != nil { - return err - } - - return c.channel.send(streamID, mtype, p) -} - -func (c *Client) recv(resp *Response, msg *message) error { - if msg.err != nil { - return msg.err - } - - if msg.Type != messageTypeResponse { - return errors.New("unknown message type received") - } - - defer c.channel.putmbuf(msg.p) - return proto.Unmarshal(msg.p, resp) -} - -// filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when -// returning from call or handling errors from main read loop. -// -// This purposely ignores errors with a wrapped cause. -func filterCloseErr(err error) error { - switch { - case err == nil: - return nil - case err == io.EOF: - return ErrClosed - case errors.Is(err, io.EOF): - return ErrClosed - case strings.Contains(err.Error(), "use of closed network connection"): - return ErrClosed - default: - // if we have an epipe on a write or econnreset on a read , we cast to errclosed - var oerr *net.OpError - if errors.As(err, &oerr) && (oerr.Op == "write" || oerr.Op == "read") { - serr, sok := oerr.Err.(*os.SyscallError) - if sok && ((serr.Err == syscall.EPIPE && oerr.Op == "write") || - (serr.Err == syscall.ECONNRESET && oerr.Op == "read")) { - - return ErrClosed - } - } - } - - return err -} diff --git a/vendor/github.com/containerd/ttrpc/codec.go b/vendor/github.com/containerd/ttrpc/codec.go deleted file mode 100644 index 880634c..0000000 --- a/vendor/github.com/containerd/ttrpc/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "fmt" - - "github.com/gogo/protobuf/proto" -) - -type codec struct{} - -func (c codec) Marshal(msg interface{}) ([]byte, error) { - switch v := msg.(type) { - case proto.Message: - return proto.Marshal(v) - default: - return nil, fmt.Errorf("ttrpc: cannot marshal unknown type: %T", msg) - } -} - -func (c codec) Unmarshal(p []byte, msg interface{}) error { - switch v := msg.(type) { - case proto.Message: - return proto.Unmarshal(p, v) - default: - return fmt.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg) - } -} diff --git a/vendor/github.com/containerd/ttrpc/config.go b/vendor/github.com/containerd/ttrpc/config.go deleted file mode 100644 index 0974196..0000000 --- a/vendor/github.com/containerd/ttrpc/config.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import "errors" - -type serverConfig struct { - handshaker Handshaker - interceptor UnaryServerInterceptor -} - -// ServerOpt for configuring a ttrpc server -type ServerOpt func(*serverConfig) error - -// WithServerHandshaker can be passed to NewServer to ensure that the -// handshaker is called before every connection attempt. -// -// Only one handshaker is allowed per server. -func WithServerHandshaker(handshaker Handshaker) ServerOpt { - return func(c *serverConfig) error { - if c.handshaker != nil { - return errors.New("only one handshaker allowed per server") - } - c.handshaker = handshaker - return nil - } -} - -// WithUnaryServerInterceptor sets the provided interceptor on the server -func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt { - return func(c *serverConfig) error { - if c.interceptor != nil { - return errors.New("only one interceptor allowed per server") - } - c.interceptor = i - return nil - } -} diff --git a/vendor/github.com/containerd/ttrpc/handshake.go b/vendor/github.com/containerd/ttrpc/handshake.go deleted file mode 100644 index a424b67..0000000 --- a/vendor/github.com/containerd/ttrpc/handshake.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "net" -) - -// Handshaker defines the interface for connection handshakes performed on the -// server or client when first connecting. -type Handshaker interface { - // Handshake should confirm or decorate a connection that may be incoming - // to a server or outgoing from a client. - // - // If this returns without an error, the caller should use the connection - // in place of the original connection. - // - // The second return value can contain credential specific data, such as - // unix socket credentials or TLS information. - // - // While we currently only have implementations on the server-side, this - // interface should be sufficient to implement similar handshakes on the - // client-side. - Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) -} - -type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) - -func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { - return fn(ctx, conn) -} - -func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { - return conn, nil, nil -} diff --git a/vendor/github.com/containerd/ttrpc/interceptor.go b/vendor/github.com/containerd/ttrpc/interceptor.go deleted file mode 100644 index c1219da..0000000 --- a/vendor/github.com/containerd/ttrpc/interceptor.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import "context" - -// UnaryServerInfo provides information about the server request -type UnaryServerInfo struct { - FullMethod string -} - -// UnaryClientInfo provides information about the client request -type UnaryClientInfo struct { - FullMethod string -} - -// Unmarshaler contains the server request data and allows it to be unmarshaled -// into a concrete type -type Unmarshaler func(interface{}) error - -// Invoker invokes the client's request and response from the ttrpc server -type Invoker func(context.Context, *Request, *Response) error - -// UnaryServerInterceptor specifies the interceptor function for server request/response -type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error) - -// UnaryClientInterceptor specifies the interceptor function for client request/response -type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error - -func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) { - return method(ctx, unmarshal) -} - -func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { - return invoker(ctx, req, resp) -} diff --git a/vendor/github.com/containerd/ttrpc/metadata.go b/vendor/github.com/containerd/ttrpc/metadata.go deleted file mode 100644 index ce8c0d1..0000000 --- a/vendor/github.com/containerd/ttrpc/metadata.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "strings" -) - -// MD is the user type for ttrpc metadata -type MD map[string][]string - -// Get returns the metadata for a given key when they exist. -// If there is no metadata, a nil slice and false are returned. -func (m MD) Get(key string) ([]string, bool) { - key = strings.ToLower(key) - list, ok := m[key] - if !ok || len(list) == 0 { - return nil, false - } - - return list, true -} - -// Set sets the provided values for a given key. -// The values will overwrite any existing values. -// If no values provided, a key will be deleted. -func (m MD) Set(key string, values ...string) { - key = strings.ToLower(key) - if len(values) == 0 { - delete(m, key) - return - } - m[key] = values -} - -// Append appends additional values to the given key. -func (m MD) Append(key string, values ...string) { - key = strings.ToLower(key) - if len(values) == 0 { - return - } - current, ok := m[key] - if ok { - m.Set(key, append(current, values...)...) - } else { - m.Set(key, values...) - } -} - -func (m MD) setRequest(r *Request) { - for k, values := range m { - for _, v := range values { - r.Metadata = append(r.Metadata, &KeyValue{ - Key: k, - Value: v, - }) - } - } -} - -func (m MD) fromRequest(r *Request) { - for _, kv := range r.Metadata { - m[kv.Key] = append(m[kv.Key], kv.Value) - } -} - -type metadataKey struct{} - -// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata) -func GetMetadata(ctx context.Context) (MD, bool) { - metadata, ok := ctx.Value(metadataKey{}).(MD) - return metadata, ok -} - -// GetMetadataValue gets a specific metadata value by name from context.Context -func GetMetadataValue(ctx context.Context, name string) (string, bool) { - metadata, ok := GetMetadata(ctx) - if !ok { - return "", false - } - - if list, ok := metadata.Get(name); ok { - return list[0], true - } - - return "", false -} - -// WithMetadata attaches metadata map to a context.Context -func WithMetadata(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, metadataKey{}, md) -} diff --git a/vendor/github.com/containerd/ttrpc/plugin/generator.go b/vendor/github.com/containerd/ttrpc/plugin/generator.go deleted file mode 100644 index 0900386..0000000 --- a/vendor/github.com/containerd/ttrpc/plugin/generator.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package plugin - -import ( - "strings" - - "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - "github.com/gogo/protobuf/protoc-gen-gogo/generator" -) - -type ttrpcGenerator struct { - *generator.Generator - generator.PluginImports - - typeurlPkg generator.Single - ttrpcPkg generator.Single - contextPkg generator.Single -} - -func init() { - generator.RegisterPlugin(new(ttrpcGenerator)) -} - -func (p *ttrpcGenerator) Name() string { - return "ttrpc" -} - -func (p *ttrpcGenerator) Init(g *generator.Generator) { - p.Generator = g -} - -func (p *ttrpcGenerator) Generate(file *generator.FileDescriptor) { - p.PluginImports = generator.NewPluginImports(p.Generator) - p.contextPkg = p.NewImport("context") - p.typeurlPkg = p.NewImport("github.com/containerd/typeurl") - p.ttrpcPkg = p.NewImport("github.com/containerd/ttrpc") - - for _, service := range file.GetService() { - serviceName := service.GetName() - if pkg := file.GetPackage(); pkg != "" { - serviceName = pkg + "." + serviceName - } - - p.genService(serviceName, service) - } -} - -func (p *ttrpcGenerator) genService(fullName string, service *descriptor.ServiceDescriptorProto) { - serviceName := service.GetName() + "Service" - p.P() - p.P("type ", serviceName, " interface{") - p.In() - for _, method := range service.Method { - p.P(method.GetName(), - "(ctx ", p.contextPkg.Use(), ".Context, ", - "req *", p.typeName(method.GetInputType()), ") ", - "(*", p.typeName(method.GetOutputType()), ", error)") - - } - p.Out() - p.P("}") - - p.P() - // registration method - p.P("func Register", serviceName, "(srv *", p.ttrpcPkg.Use(), ".Server, svc ", serviceName, ") {") - p.In() - p.P(`srv.Register("`, fullName, `", map[string]`, p.ttrpcPkg.Use(), ".Method{") - p.In() - for _, method := range service.Method { - p.P(`"`, method.GetName(), `": `, `func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {`) - p.In() - p.P("var req ", p.typeName(method.GetInputType())) - p.P(`if err := unmarshal(&req); err != nil {`) - p.In() - p.P(`return nil, err`) - p.Out() - p.P(`}`) - p.P("return svc.", method.GetName(), "(ctx, &req)") - p.Out() - p.P("},") - } - p.Out() - p.P("})") - p.Out() - p.P("}") - - clientType := service.GetName() + "Client" - clientStructType := strings.ToLower(clientType[:1]) + clientType[1:] - p.P() - p.P("type ", clientStructType, " struct{") - p.In() - p.P("client *", p.ttrpcPkg.Use(), ".Client") - p.Out() - p.P("}") - p.P() - p.P("func New", clientType, "(client *", p.ttrpcPkg.Use(), ".Client)", serviceName, "{") - p.In() - p.P("return &", clientStructType, "{") - p.In() - p.P("client: client,") - p.Out() - p.P("}") - p.Out() - p.P("}") - p.P() - for _, method := range service.Method { - p.P() - p.P("func (c *", clientStructType, ") ", method.GetName(), - "(ctx ", p.contextPkg.Use(), ".Context, ", - "req *", p.typeName(method.GetInputType()), ") ", - "(*", p.typeName(method.GetOutputType()), ", error) {") - p.In() - p.P("var resp ", p.typeName(method.GetOutputType())) - p.P("if err := c.client.Call(ctx, ", `"`+fullName+`", `, `"`+method.GetName()+`"`, ", req, &resp); err != nil {") - p.In() - p.P("return nil, err") - p.Out() - p.P("}") - p.P("return &resp, nil") - p.Out() - p.P("}") - } -} - -func (p *ttrpcGenerator) objectNamed(name string) generator.Object { - p.Generator.RecordTypeUse(name) - return p.Generator.ObjectNamed(name) -} - -func (p *ttrpcGenerator) typeName(str string) string { - return p.Generator.TypeName(p.objectNamed(str)) -} diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go deleted file mode 100644 index b0e4807..0000000 --- a/vendor/github.com/containerd/ttrpc/server.go +++ /dev/null @@ -1,500 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "io" - "math/rand" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var ( - ErrServerClosed = errors.New("ttrpc: server closed") -) - -type Server struct { - config *serverConfig - services *serviceSet - codec codec - - mu sync.Mutex - listeners map[net.Listener]struct{} - connections map[*serverConn]struct{} // all connections to current state - done chan struct{} // marks point at which we stop serving requests -} - -func NewServer(opts ...ServerOpt) (*Server, error) { - config := &serverConfig{} - for _, opt := range opts { - if err := opt(config); err != nil { - return nil, err - } - } - if config.interceptor == nil { - config.interceptor = defaultServerInterceptor - } - - return &Server{ - config: config, - services: newServiceSet(config.interceptor), - done: make(chan struct{}), - listeners: make(map[net.Listener]struct{}), - connections: make(map[*serverConn]struct{}), - }, nil -} - -func (s *Server) Register(name string, methods map[string]Method) { - s.services.register(name, methods) -} - -func (s *Server) Serve(ctx context.Context, l net.Listener) error { - s.addListener(l) - defer s.closeListener(l) - - var ( - backoff time.Duration - handshaker = s.config.handshaker - ) - - if handshaker == nil { - handshaker = handshakerFunc(noopHandshake) - } - - for { - conn, err := l.Accept() - if err != nil { - select { - case <-s.done: - return ErrServerClosed - default: - } - - if terr, ok := err.(interface { - Temporary() bool - }); ok && terr.Temporary() { - if backoff == 0 { - backoff = time.Millisecond - } else { - backoff *= 2 - } - - if max := time.Second; backoff > max { - backoff = max - } - - sleep := time.Duration(rand.Int63n(int64(backoff))) - logrus.WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep) - time.Sleep(sleep) - continue - } - - return err - } - - backoff = 0 - - approved, handshake, err := handshaker.Handshake(ctx, conn) - if err != nil { - logrus.WithError(err).Errorf("ttrpc: refusing connection after handshake") - conn.Close() - continue - } - - sc := s.newConn(approved, handshake) - go sc.run(ctx) - } -} - -func (s *Server) Shutdown(ctx context.Context) error { - s.mu.Lock() - select { - case <-s.done: - default: - // protected by mutex - close(s.done) - } - lnerr := s.closeListeners() - s.mu.Unlock() - - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - for { - if s.closeIdleConns() { - return lnerr - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -// Close the server without waiting for active connections. -func (s *Server) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - - select { - case <-s.done: - default: - // protected by mutex - close(s.done) - } - - err := s.closeListeners() - for c := range s.connections { - c.close() - delete(s.connections, c) - } - - return err -} - -func (s *Server) addListener(l net.Listener) { - s.mu.Lock() - defer s.mu.Unlock() - s.listeners[l] = struct{}{} -} - -func (s *Server) closeListener(l net.Listener) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.closeListenerLocked(l) -} - -func (s *Server) closeListenerLocked(l net.Listener) error { - defer delete(s.listeners, l) - return l.Close() -} - -func (s *Server) closeListeners() error { - var err error - for l := range s.listeners { - if cerr := s.closeListenerLocked(l); cerr != nil && err == nil { - err = cerr - } - } - return err -} - -func (s *Server) addConnection(c *serverConn) { - s.mu.Lock() - defer s.mu.Unlock() - - s.connections[c] = struct{}{} -} - -func (s *Server) delConnection(c *serverConn) { - s.mu.Lock() - defer s.mu.Unlock() - - delete(s.connections, c) -} - -func (s *Server) countConnection() int { - s.mu.Lock() - defer s.mu.Unlock() - - return len(s.connections) -} - -func (s *Server) closeIdleConns() bool { - s.mu.Lock() - defer s.mu.Unlock() - quiescent := true - for c := range s.connections { - st, ok := c.getState() - if !ok || st != connStateIdle { - quiescent = false - continue - } - c.close() - delete(s.connections, c) - } - return quiescent -} - -type connState int - -const ( - connStateActive = iota + 1 // outstanding requests - connStateIdle // no requests - connStateClosed // closed connection -) - -func (cs connState) String() string { - switch cs { - case connStateActive: - return "active" - case connStateIdle: - return "idle" - case connStateClosed: - return "closed" - default: - return "unknown" - } -} - -func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn { - c := &serverConn{ - server: s, - conn: conn, - handshake: handshake, - shutdown: make(chan struct{}), - } - c.setState(connStateIdle) - s.addConnection(c) - return c -} - -type serverConn struct { - server *Server - conn net.Conn - handshake interface{} // data from handshake, not used for now - state atomic.Value - - shutdownOnce sync.Once - shutdown chan struct{} // forced shutdown, used by close -} - -func (c *serverConn) getState() (connState, bool) { - cs, ok := c.state.Load().(connState) - return cs, ok -} - -func (c *serverConn) setState(newstate connState) { - c.state.Store(newstate) -} - -func (c *serverConn) close() error { - c.shutdownOnce.Do(func() { - close(c.shutdown) - }) - - return nil -} - -func (c *serverConn) run(sctx context.Context) { - type ( - request struct { - id uint32 - req *Request - } - - response struct { - id uint32 - resp *Response - } - ) - - var ( - ch = newChannel(c.conn) - ctx, cancel = context.WithCancel(sctx) - active int - state connState = connStateIdle - responses = make(chan response) - requests = make(chan request) - recvErr = make(chan error, 1) - shutdown = c.shutdown - done = make(chan struct{}) - ) - - defer c.conn.Close() - defer cancel() - defer close(done) - defer c.server.delConnection(c) - - go func(recvErr chan error) { - defer close(recvErr) - sendImmediate := func(id uint32, st *status.Status) bool { - select { - case responses <- response{ - // even though we've had an invalid stream id, we send it - // back on the same stream id so the client knows which - // stream id was bad. - id: id, - resp: &Response{ - Status: st.Proto(), - }, - }: - return true - case <-c.shutdown: - return false - case <-done: - return false - } - } - - for { - select { - case <-c.shutdown: - return - case <-done: - return - default: // proceed - } - - mh, p, err := ch.recv() - if err != nil { - status, ok := status.FromError(err) - if !ok { - recvErr <- err - return - } - - // in this case, we send an error for that particular message - // when the status is defined. - if !sendImmediate(mh.StreamID, status) { - return - } - - continue - } - - if mh.Type != messageTypeRequest { - // we must ignore this for future compat. - continue - } - - var req Request - if err := c.server.codec.Unmarshal(p, &req); err != nil { - ch.putmbuf(p) - if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { - return - } - continue - } - ch.putmbuf(p) - - if mh.StreamID%2 != 1 { - // enforce odd client initiated identifiers. - if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { - return - } - continue - } - - // Forward the request to the main loop. We don't wait on s.done - // because we have already accepted the client request. - select { - case requests <- request{ - id: mh.StreamID, - req: &req, - }: - case <-done: - return - } - } - }(recvErr) - - for { - newstate := state - switch { - case active > 0: - newstate = connStateActive - shutdown = nil - case active == 0: - newstate = connStateIdle - shutdown = c.shutdown // only enable this branch in idle mode - } - - if newstate != state { - c.setState(newstate) - state = newstate - } - - select { - case request := <-requests: - active++ - go func(id uint32) { - ctx, cancel := getRequestContext(ctx, request.req) - defer cancel() - - p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload) - resp := &Response{ - Status: status.Proto(), - Payload: p, - } - - select { - case responses <- response{ - id: id, - resp: resp, - }: - case <-done: - } - }(request.id) - case response := <-responses: - p, err := c.server.codec.Marshal(response.resp) - if err != nil { - logrus.WithError(err).Error("failed marshaling response") - return - } - - if err := ch.send(response.id, messageTypeResponse, p); err != nil { - logrus.WithError(err).Error("failed sending message on channel") - return - } - - active-- - case err := <-recvErr: - // TODO(stevvooe): Not wildly clear what we should do in this - // branch. Basically, it means that we are no longer receiving - // requests due to a terminal error. - recvErr = nil // connection is now "closing" - if err == io.EOF || err == io.ErrUnexpectedEOF { - // The client went away and we should stop processing - // requests, so that the client connection is closed - return - } - if err != nil { - logrus.WithError(err).Error("error receiving message") - } - case <-shutdown: - return - } - } -} - -var noopFunc = func() {} - -func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) { - if len(req.Metadata) > 0 { - md := MD{} - md.fromRequest(req) - ctx = WithMetadata(ctx, md) - } - - cancel = noopFunc - if req.TimeoutNano == 0 { - return ctx, cancel - } - - ctx, cancel = context.WithTimeout(ctx, time.Duration(req.TimeoutNano)) - return ctx, cancel -} diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go deleted file mode 100644 index f359e96..0000000 --- a/vendor/github.com/containerd/ttrpc/services.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "path" - "unsafe" - - "github.com/gogo/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) - -type ServiceDesc struct { - Methods map[string]Method - - // TODO(stevvooe): Add stream support. -} - -type serviceSet struct { - services map[string]ServiceDesc - interceptor UnaryServerInterceptor -} - -func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { - return &serviceSet{ - services: make(map[string]ServiceDesc), - interceptor: interceptor, - } -} - -func (s *serviceSet) register(name string, methods map[string]Method) { - if _, ok := s.services[name]; ok { - panic(fmt.Errorf("duplicate service %v registered", name)) - } - - s.services[name] = ServiceDesc{ - Methods: methods, - } -} - -func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) { - p, err := s.dispatch(ctx, serviceName, methodName, p) - st, ok := status.FromError(err) - if !ok { - st = status.New(convertCode(err), err.Error()) - } - - return p, st -} - -func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) { - method, err := s.resolve(serviceName, methodName) - if err != nil { - return nil, err - } - - unmarshal := func(obj interface{}) error { - switch v := obj.(type) { - case proto.Message: - if err := proto.Unmarshal(p, v); err != nil { - return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) - } - default: - return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) - } - return nil - } - - info := &UnaryServerInfo{ - FullMethod: fullPath(serviceName, methodName), - } - - resp, err := s.interceptor(ctx, unmarshal, info, method) - if err != nil { - return nil, err - } - - if isNil(resp) { - return nil, errors.New("ttrpc: marshal called with nil") - } - - switch v := resp.(type) { - case proto.Message: - r, err := proto.Marshal(v) - if err != nil { - return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error()) - } - - return r, nil - default: - return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v) - } -} - -func (s *serviceSet) resolve(service, method string) (Method, error) { - srv, ok := s.services[service] - if !ok { - return nil, status.Errorf(codes.Unimplemented, "service %v", service) - } - - mthd, ok := srv.Methods[method] - if !ok { - return nil, status.Errorf(codes.Unimplemented, "method %v", method) - } - - return mthd, nil -} - -// convertCode maps stdlib go errors into grpc space. -// -// This is ripped from the grpc-go code base. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - -func fullPath(service, method string) string { - return "/" + path.Join(service, method) -} - -func isNil(resp interface{}) bool { - return (*[2]uintptr)(unsafe.Pointer(&resp))[1] == 0 -} diff --git a/vendor/github.com/containerd/ttrpc/types.go b/vendor/github.com/containerd/ttrpc/types.go deleted file mode 100644 index 9a1c19a..0000000 --- a/vendor/github.com/containerd/ttrpc/types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "fmt" - - spb "google.golang.org/genproto/googleapis/rpc/status" -) - -type Request struct { - Service string `protobuf:"bytes,1,opt,name=service,proto3"` - Method string `protobuf:"bytes,2,opt,name=method,proto3"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` - TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` - Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"` -} - -func (r *Request) Reset() { *r = Request{} } -func (r *Request) String() string { return fmt.Sprintf("%+#v", r) } -func (r *Request) ProtoMessage() {} - -type Response struct { - Status *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"` - Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3"` -} - -func (r *Response) Reset() { *r = Response{} } -func (r *Response) String() string { return fmt.Sprintf("%+#v", r) } -func (r *Response) ProtoMessage() {} - -type StringList struct { - List []string `protobuf:"bytes,1,rep,name=list,proto3"` -} - -func (r *StringList) Reset() { *r = StringList{} } -func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) } -func (r *StringList) ProtoMessage() {} - -func makeStringList(item ...string) StringList { return StringList{List: item} } - -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3"` - Value string `protobuf:"bytes,2,opt,name=value,proto3"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (*KeyValue) ProtoMessage() {} -func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) } diff --git a/vendor/github.com/containerd/ttrpc/unixcreds_linux.go b/vendor/github.com/containerd/ttrpc/unixcreds_linux.go deleted file mode 100644 index a59dad6..0000000 --- a/vendor/github.com/containerd/ttrpc/unixcreds_linux.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "fmt" - "net" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -type UnixCredentialsFunc func(*unix.Ucred) error - -func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { - uc, err := requireUnixSocket(conn) - if err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: require unix socket: %w", err) - } - - rs, err := uc.SyscallConn() - if err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed: %w", err) - } - var ( - ucred *unix.Ucred - ucredErr error - ) - if err := rs.Control(func(fd uintptr) { - ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED) - }); err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed: %w", err) - } - - if ucredErr != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials: %w", err) - } - - if err := fn(ucred); err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: credential check failed: %w", err) - } - - return uc, ucred, nil -} - -// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID. -// -// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an -// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001. -// So calling this function with uid=0 allows a connection from effective UID 0 but rejects -// a connection from effective UID 1001. -// -// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)." -func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc { - return func(ucred *unix.Ucred) error { - return requireUidGid(ucred, uid, gid) - } -} - -func UnixSocketRequireRoot() UnixCredentialsFunc { - return UnixSocketRequireUidGid(0, 0) -} - -// UnixSocketRequireSameUser resolves the current effective unix user and returns a -// UnixCredentialsFunc that will validate incoming unix connections against the -// current credentials. -// -// This is useful when using abstract sockets that are accessible by all users. -func UnixSocketRequireSameUser() UnixCredentialsFunc { - euid, egid := os.Geteuid(), os.Getegid() - return UnixSocketRequireUidGid(euid, egid) -} - -func requireRoot(ucred *unix.Ucred) error { - return requireUidGid(ucred, 0, 0) -} - -func requireUidGid(ucred *unix.Ucred, uid, gid int) error { - if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) { - return fmt.Errorf("ttrpc: invalid credentials: %v", syscall.EPERM) - } - return nil -} - -func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) { - uc, ok := conn.(*net.UnixConn) - if !ok { - return nil, errors.New("a unix socket connection is required") - } - - return uc, nil -} diff --git a/vendor/github.com/containerd/typeurl/.gitignore b/vendor/github.com/containerd/typeurl/.gitignore deleted file mode 100644 index d538467..0000000 --- a/vendor/github.com/containerd/typeurl/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.test -coverage.txt diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/containerd/typeurl/LICENSE deleted file mode 100644 index 584149b..0000000 --- a/vendor/github.com/containerd/typeurl/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md deleted file mode 100644 index d021e96..0000000 --- a/vendor/github.com/containerd/typeurl/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# typeurl - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) -[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) - -A Go package for managing the registration, marshaling, and unmarshaling of encoded types. - -This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto). - -## Project details - -**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/doc.go b/vendor/github.com/containerd/typeurl/doc.go deleted file mode 100644 index c0d0fd2..0000000 --- a/vendor/github.com/containerd/typeurl/doc.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -// Package typeurl assists with managing the registration, marshaling, and -// unmarshaling of types encoded as protobuf.Any. -// -// A protobuf.Any is a proto message that can contain any arbitrary data. It -// consists of two components, a TypeUrl and a Value, and its proto definition -// looks like this: -// -// message Any { -// string type_url = 1; -// bytes value = 2; -// } -// -// The TypeUrl is used to distinguish the contents from other proto.Any -// messages. This typeurl library manages these URLs to enable automagic -// marshaling and unmarshaling of the contents. -// -// For example, consider this go struct: -// -// type Foo struct { -// Field1 string -// Field2 string -// } -// -// To use typeurl, types must first be registered. This is typically done in -// the init function -// -// func init() { -// typeurl.Register(&Foo{}, "Foo") -// } -// -// This will register the type Foo with the url path "Foo". The arguments to -// Register are variadic, and are used to construct a url path. Consider this -// example, from the github.com/containerd/containerd/client package: -// -// func init() { -// const prefix = "types.containerd.io" -// // register TypeUrls for commonly marshaled external types -// major := strconv.Itoa(specs.VersionMajor) -// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") -// // this function has more Register calls, which are elided. -// } -// -// This registers several types under a more complex url, which ends up mapping -// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other -// value for major). -// -// Once a type is registered, it can be marshaled to a proto.Any message simply -// by calling `MarshalAny`, like this: -// -// foo := &Foo{Field1: "value1", Field2: "value2"} -// anyFoo, err := typeurl.MarshalAny(foo) -// -// MarshalAny will resolve the correct URL for the type. If the type in -// question implements the proto.Message interface, then it will be marshaled -// as a proto message. Otherwise, it will be marshaled as json. This means that -// typeurl will work on any arbitrary data, whether or not it has a proto -// definition, as long as it can be serialized to json. -// -// To unmarshal, the process is simply inverse: -// -// iface, err := typeurl.UnmarshalAny(anyFoo) -// foo := iface.(*Foo) -// -// The correct type is automatically chosen from the type registry, and the -// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go deleted file mode 100644 index 647d419..0000000 --- a/vendor/github.com/containerd/typeurl/types.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -import ( - "encoding/json" - "path" - "reflect" - "sync" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/pkg/errors" -) - -var ( - mu sync.RWMutex - registry = make(map[reflect.Type]string) -) - -// Definitions of common error types used throughout typeurl. -// -// These error types are used with errors.Wrap and errors.Wrapf to add context -// to an error. -// -// To detect an error class, use errors.Is() functions to tell whether an -// error is of this type. -var ( - ErrNotFound = errors.New("not found") -) - -// Register a type with a base URL for JSON marshaling. When the MarshalAny and -// UnmarshalAny functions are called they will treat the Any type value as JSON. -// To use protocol buffers for handling the Any value the proto.Register -// function should be used instead of this function. -func Register(v interface{}, args ...string) { - var ( - t = tryDereference(v) - p = path.Join(args...) - ) - mu.Lock() - defer mu.Unlock() - if et, ok := registry[t]; ok { - if et != p { - panic(errors.Errorf("type registered with alternate path %q != %q", et, p)) - } - return - } - registry[t] = p -} - -// TypeURL returns the type url for a registered type. -func TypeURL(v interface{}) (string, error) { - mu.RLock() - u, ok := registry[tryDereference(v)] - mu.RUnlock() - if !ok { - // fallback to the proto registry if it is a proto message - pb, ok := v.(proto.Message) - if !ok { - return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v)) - } - return proto.MessageName(pb), nil - } - return u, nil -} - -// Is returns true if the type of the Any is the same as v. -func Is(any *types.Any, v interface{}) bool { - // call to check that v is a pointer - tryDereference(v) - url, err := TypeURL(v) - if err != nil { - return false - } - return any.TypeUrl == url -} - -// MarshalAny marshals the value v into an any with the correct TypeUrl. -// If the provided object is already a proto.Any message, then it will be -// returned verbatim. If it is of type proto.Message, it will be marshaled as a -// protocol buffer. Otherwise, the object will be marshaled to json. -func MarshalAny(v interface{}) (*types.Any, error) { - var marshal func(v interface{}) ([]byte, error) - switch t := v.(type) { - case *types.Any: - // avoid reserializing the type if we have an any. - return t, nil - case proto.Message: - marshal = func(v interface{}) ([]byte, error) { - return proto.Marshal(t) - } - default: - marshal = json.Marshal - } - - url, err := TypeURL(v) - if err != nil { - return nil, err - } - - data, err := marshal(v) - if err != nil { - return nil, err - } - return &types.Any{ - TypeUrl: url, - Value: data, - }, nil -} - -// UnmarshalAny unmarshals the any type into a concrete type. -func UnmarshalAny(any *types.Any) (interface{}, error) { - return UnmarshalByTypeURL(any.TypeUrl, any.Value) -} - -// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type. -func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { - return unmarshal(typeURL, value, nil) -} - -// UnmarshalTo unmarshals the any type into a concrete type passed in the out -// argument. It is identical to UnmarshalAny, but lets clients provide a -// destination type through the out argument. -func UnmarshalTo(any *types.Any, out interface{}) error { - return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out) -} - -// UnmarshalTo unmarshals the given type and value into a concrete type passed -// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients -// provide a destination type through the out argument. -func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { - _, err := unmarshal(typeURL, value, out) - return err -} - -func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) - if err != nil { - return nil, err - } - - if v == nil { - v = reflect.New(t.t).Interface() - } else { - // Validate interface type provided by client - vURL, err := TypeURL(v) - if err != nil { - return nil, err - } - if typeURL != vURL { - return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL) - } - } - - if t.isProto { - err = proto.Unmarshal(value, v.(proto.Message)) - } else { - err = json.Unmarshal(value, v) - } - - return v, err -} - -type urlType struct { - t reflect.Type - isProto bool -} - -func getTypeByUrl(url string) (urlType, error) { - mu.RLock() - for t, u := range registry { - if u == url { - mu.RUnlock() - return urlType{ - t: t, - }, nil - } - } - mu.RUnlock() - // fallback to proto registry - t := proto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } - return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url) -} - -func tryDereference(v interface{}) reflect.Type { - t := reflect.TypeOf(v) - if t.Kind() == reflect.Ptr { - // require check of pointer but dereference to register - return t.Elem() - } - panic("v is not a pointer to a type") -} diff --git a/vendor/github.com/containerd/zfs/README.md b/vendor/github.com/containerd/zfs/README.md index badd83f..b747037 100644 --- a/vendor/github.com/containerd/zfs/README.md +++ b/vendor/github.com/containerd/zfs/README.md @@ -1,7 +1,9 @@ # [containerd](https://github.com/containerd/containerd) ZFS snapshotter plugin -[![Build Status](https://travis-ci.org/containerd/zfs.svg)](https://travis-ci.org/containerd/zfs) -[![codecov](https://codecov.io/gh/containerd/zfs/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/zfs) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/zfs)](https://pkg.go.dev/github.com/containerd/zfs) +[![Build Status](https://github.com/containerd/zfs/actions/workflows/ci.yml/badge.svg)](https://github.com/containerd/zfs/actions/workflows/ci.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/zfs)](https://goreportcard.com/report/github.com/containerd/zfs) +[![codecov](https://codecov.io/gh/containerd/zfs/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/zfs) ZFS snapshotter plugin for containerd. @@ -26,8 +28,8 @@ $ zfs create -o mountpoint=/var/lib/containerd/io.containerd.snapshotter.v1.zfs The zfs plugin is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/zfs/plugin/plugin.go b/vendor/github.com/containerd/zfs/plugin/plugin.go index 2967f2f..b318949 100644 --- a/vendor/github.com/containerd/zfs/plugin/plugin.go +++ b/vendor/github.com/containerd/zfs/plugin/plugin.go @@ -1,3 +1,5 @@ +//go:build linux || freebsd + /* Copyright The containerd Authors. @@ -17,10 +19,12 @@ package plugin import ( + "errors" + "fmt" + "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/zfs" - "github.com/pkg/errors" ) // Config represents configuration for the zfs plugin @@ -50,7 +54,7 @@ func init() { ic.Meta.Exports["root"] = root snapshotter, err := zfs.NewSnapshotter(root) if err != nil { - return nil, errors.Wrap(plugin.ErrSkipPlugin, err.Error()) + return nil, fmt.Errorf("%s: %w", err.Error(), plugin.ErrSkipPlugin) } return snapshotter, nil }, diff --git a/vendor/github.com/containerd/zfs/zfs.go b/vendor/github.com/containerd/zfs/zfs.go index 23ff251..e48b956 100644 --- a/vendor/github.com/containerd/zfs/zfs.go +++ b/vendor/github.com/containerd/zfs/zfs.go @@ -1,4 +1,4 @@ -// +build linux freebsd +//go:build linux || freebsd /* Copyright The containerd Authors. @@ -20,6 +20,7 @@ package zfs import ( "context" + "fmt" "math" "path/filepath" @@ -27,8 +28,7 @@ import ( "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/storage" - zfs "github.com/mistifyio/go-zfs" - "github.com/pkg/errors" + "github.com/mistifyio/go-zfs/v3" ) const ( @@ -56,7 +56,7 @@ func NewSnapshotter(root string) (snapshots.Snapshotter, error) { return nil, err } if m.FSType != "zfs" { - return nil, errors.Errorf("path %s must be a zfs filesystem to be used with the zfs snapshotter", root) + return nil, fmt.Errorf("path %s must be a zfs filesystem to be used with the zfs snapshotter", root) } dataset, err := zfs.GetDataset(m.Source) if err != nil { @@ -75,11 +75,9 @@ func NewSnapshotter(root string) (snapshots.Snapshotter, error) { return b, nil } -var ( - zfsCreateProperties = map[string]string{ - "mountpoint": "legacy", - } -) +var zfsCreateProperties = map[string]string{ + "mountpoint": "legacy", +} // createFilesystem creates but not mount. func createFilesystem(datasetName string) (*zfs.Dataset, error) { @@ -138,13 +136,12 @@ func (z *snapshotter) usage(ctx context.Context, key string) (snapshots.Usage, e if info.Kind == snapshots.KindActive { activeName := filepath.Join(z.dataset.Name, id) sDataset, err := zfs.GetDataset(activeName) - if err != nil { return snapshots.Usage{}, err } if int64(sDataset.Used) > maxSnapshotSize { - return snapshots.Usage{}, errors.Errorf("Dataset size exceeds maximum snapshot size of %d bytes", maxSnapshotSize) + return snapshots.Usage{}, fmt.Errorf("Dataset size exceeds maximum snapshot size of %d bytes", maxSnapshotSize) } usage = snapshots.Usage{ @@ -240,7 +237,7 @@ func (z *snapshotter) mounts(dataset *zfs.Dataset, readonly bool) ([]mount.Mount func (z *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) (err error) { usage, err := z.usage(ctx, key) if err != nil { - return errors.Wrap(err, "failed to compute usage") + return fmt.Errorf("failed to compute usage: %w", err) } ctx, t, err := z.ms.TransactionContext(ctx, true) @@ -257,7 +254,7 @@ func (z *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap id, err := storage.CommitActive(ctx, key, name, usage, opts...) if err != nil { - return errors.Wrap(err, "failed to commit") + return fmt.Errorf("failed to commit: %w", err) } activeName := filepath.Join(z.dataset.Name, id) @@ -293,7 +290,7 @@ func (z *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er s, err := storage.GetSnapshot(ctx, key) t.Rollback() //nolint:errcheck if err != nil { - return nil, errors.Wrap(err, "failed to get active snapshot") + return nil, fmt.Errorf("failed to get active snapshot: %w", err) } sName := filepath.Join(z.dataset.Name, s.ID) sDataset, err := zfs.GetDataset(sName) @@ -321,7 +318,7 @@ func (z *snapshotter) Remove(ctx context.Context, key string) (err error) { id, k, err := storage.Remove(ctx, key) if err != nil { - return errors.Wrap(err, "failed to remove snapshot") + return fmt.Errorf("failed to remove snapshot: %w", err) } datasetName := filepath.Join(z.dataset.Name, id) diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE deleted file mode 100644 index 8f71f43..0000000 --- a/vendor/github.com/containernetworking/cni/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go deleted file mode 100644 index 7e52bd8..0000000 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ /dev/null @@ -1,673 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package libcni - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/containernetworking/cni/pkg/invoke" - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/utils" - "github.com/containernetworking/cni/pkg/version" -) - -var ( - CacheDir = "/var/lib/cni" -) - -const ( - CNICacheV1 = "cniCacheV1" -) - -// A RuntimeConf holds the arguments to one invocation of a CNI plugin -// excepting the network configuration, with the nested exception that -// the `runtimeConfig` from the network configuration is included -// here. -type RuntimeConf struct { - ContainerID string - NetNS string - IfName string - Args [][2]string - // A dictionary of capability-specific data passed by the runtime - // to plugins as top-level keys in the 'runtimeConfig' dictionary - // of the plugin's stdin data. libcni will ensure that only keys - // in this map which match the capabilities of the plugin are passed - // to the plugin - CapabilityArgs map[string]interface{} - - // DEPRECATED. Will be removed in a future release. - CacheDir string -} - -type NetworkConfig struct { - Network *types.NetConf - Bytes []byte -} - -type NetworkConfigList struct { - Name string - CNIVersion string - DisableCheck bool - Plugins []*NetworkConfig - Bytes []byte -} - -type CNI interface { - AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) - CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error - DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error - GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) - GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) - - AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) - CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error - DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error - GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) - GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) - - ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) - ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) -} - -type CNIConfig struct { - Path []string - exec invoke.Exec - cacheDir string -} - -// CNIConfig implements the CNI interface -var _ CNI = &CNIConfig{} - -// NewCNIConfig returns a new CNIConfig object that will search for plugins -// in the given paths and use the given exec interface to run those plugins, -// or if the exec interface is not given, will use a default exec handler. -func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { - return NewCNIConfigWithCacheDir(path, "", exec) -} - -// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins -// in the given paths use the given exec interface to run those plugins, -// or if the exec interface is not given, will use a default exec handler. -// The given cache directory will be used for temporary data storage when needed. -func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { - return &CNIConfig{ - Path: path, - cacheDir: cacheDir, - exec: exec, - } -} - -func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { - var err error - - inject := map[string]interface{}{ - "name": name, - "cniVersion": cniVersion, - } - // Add previous plugin result - if prevResult != nil { - inject["prevResult"] = prevResult - } - - // Ensure every config uses the same name and version - orig, err = InjectConf(orig, inject) - if err != nil { - return nil, err - } - - return injectRuntimeConfig(orig, rt) -} - -// This function takes a libcni RuntimeConf structure and injects values into -// a "runtimeConfig" dictionary in the CNI network configuration JSON that -// will be passed to the plugin on stdin. -// -// Only "capabilities arguments" passed by the runtime are currently injected. -// These capabilities arguments are filtered through the plugin's advertised -// capabilities from its config JSON, and any keys in the CapabilityArgs -// matching plugin capabilities are added to the "runtimeConfig" dictionary -// sent to the plugin via JSON on stdin. For example, if the plugin's -// capabilities include "portMappings", and the CapabilityArgs map includes a -// "portMappings" key, that key and its value are added to the "runtimeConfig" -// dictionary to be passed to the plugin's stdin. -func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { - var err error - - rc := make(map[string]interface{}) - for capability, supported := range orig.Network.Capabilities { - if !supported { - continue - } - if data, ok := rt.CapabilityArgs[capability]; ok { - rc[capability] = data - } - } - - if len(rc) > 0 { - orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) - if err != nil { - return nil, err - } - } - - return orig, nil -} - -// ensure we have a usable exec if the CNIConfig was not given one -func (c *CNIConfig) ensureExec() invoke.Exec { - if c.exec == nil { - c.exec = &invoke.DefaultExec{ - RawExec: &invoke.RawExec{Stderr: os.Stderr}, - PluginDecoder: version.PluginDecoder{}, - } - } - return c.exec -} - -type cachedInfo struct { - Kind string `json:"kind"` - ContainerID string `json:"containerId"` - Config []byte `json:"config"` - IfName string `json:"ifName"` - NetworkName string `json:"networkName"` - CniArgs [][2]string `json:"cniArgs,omitempty"` - CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` - RawResult map[string]interface{} `json:"result,omitempty"` - Result types.Result `json:"-"` -} - -// getCacheDir returns the cache directory in this order: -// 1) global cacheDir from CNIConfig object -// 2) deprecated cacheDir from RuntimeConf object -// 3) fall back to default cache directory -func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { - if c.cacheDir != "" { - return c.cacheDir - } - if rt.CacheDir != "" { - return rt.CacheDir - } - return CacheDir -} - -func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { - if netName == "" || rt.ContainerID == "" || rt.IfName == "" { - return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) - } - return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil -} - -func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { - cached := cachedInfo{ - Kind: CNICacheV1, - ContainerID: rt.ContainerID, - Config: config, - IfName: rt.IfName, - NetworkName: netName, - CniArgs: rt.Args, - CapabilityArgs: rt.CapabilityArgs, - } - - // We need to get type.Result into cachedInfo as JSON map - // Marshal to []byte, then Unmarshal into cached.RawResult - data, err := json.Marshal(result) - if err != nil { - return err - } - - err = json.Unmarshal(data, &cached.RawResult) - if err != nil { - return err - } - - newBytes, err := json.Marshal(&cached) - if err != nil { - return err - } - - fname, err := c.getCacheFilePath(netName, rt) - if err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { - return err - } - - return ioutil.WriteFile(fname, newBytes, 0600) -} - -func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { - fname, err := c.getCacheFilePath(netName, rt) - if err != nil { - // Ignore error - return nil - } - return os.Remove(fname) -} - -func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { - var bytes []byte - - fname, err := c.getCacheFilePath(netName, rt) - if err != nil { - return nil, nil, err - } - bytes, err = ioutil.ReadFile(fname) - if err != nil { - // Ignore read errors; the cached result may not exist on-disk - return nil, nil, nil - } - - unmarshaled := cachedInfo{} - if err := json.Unmarshal(bytes, &unmarshaled); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %v", netName, err) - } - if unmarshaled.Kind != CNICacheV1 { - return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) - } - - newRt := *rt - if unmarshaled.CniArgs != nil { - newRt.Args = unmarshaled.CniArgs - } - newRt.CapabilityArgs = unmarshaled.CapabilityArgs - - return unmarshaled.Config, &newRt, nil -} - -func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { - fname, err := c.getCacheFilePath(netName, rt) - if err != nil { - return nil, err - } - data, err := ioutil.ReadFile(fname) - if err != nil { - // Ignore read errors; the cached result may not exist on-disk - return nil, nil - } - - // Read the version of the cached result - decoder := version.ConfigDecoder{} - resultCniVersion, err := decoder.Decode(data) - if err != nil { - return nil, err - } - - // Ensure we can understand the result - result, err := version.NewResult(resultCniVersion, data) - if err != nil { - return nil, err - } - - // Convert to the config version to ensure plugins get prevResult - // in the same version as the config. The cached result version - // should match the config version unless the config was changed - // while the container was running. - result, err = result.GetAsVersion(cniVersion) - if err != nil && resultCniVersion != cniVersion { - return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err) - } - return result, err -} - -func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { - fname, err := c.getCacheFilePath(netName, rt) - if err != nil { - return nil, err - } - fdata, err := ioutil.ReadFile(fname) - if err != nil { - // Ignore read errors; the cached result may not exist on-disk - return nil, nil - } - - cachedInfo := cachedInfo{} - if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { - return c.getLegacyCachedResult(netName, cniVersion, rt) - } - - newBytes, err := json.Marshal(&cachedInfo.RawResult) - if err != nil { - return nil, fmt.Errorf("failed to marshal cached network %q config: %v", netName, err) - } - - // Read the version of the cached result - decoder := version.ConfigDecoder{} - resultCniVersion, err := decoder.Decode(newBytes) - if err != nil { - return nil, err - } - - // Ensure we can understand the result - result, err := version.NewResult(resultCniVersion, newBytes) - if err != nil { - return nil, err - } - - // Convert to the config version to ensure plugins get prevResult - // in the same version as the config. The cached result version - // should match the config version unless the config was changed - // while the container was running. - result, err = result.GetAsVersion(cniVersion) - if err != nil && resultCniVersion != cniVersion { - return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err) - } - return result, err -} - -// GetNetworkListCachedResult returns the cached Result of the previous -// AddNetworkList() operation for a network list, or an error. -func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { - return c.getCachedResult(list.Name, list.CNIVersion, rt) -} - -// GetNetworkCachedResult returns the cached Result of the previous -// AddNetwork() operation for a network, or an error. -func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) -} - -// GetNetworkListCachedConfig copies the input RuntimeConf to output -// RuntimeConf with fields updated with info from the cached Config. -func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { - return c.getCachedConfig(list.Name, rt) -} - -// GetNetworkCachedConfig copies the input RuntimeConf to output -// RuntimeConf with fields updated with info from the cached Config. -func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { - return c.getCachedConfig(net.Network.Name, rt) -} - -func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) - if err != nil { - return nil, err - } - if err := utils.ValidateContainerID(rt.ContainerID); err != nil { - return nil, err - } - if err := utils.ValidateNetworkName(name); err != nil { - return nil, err - } - if err := utils.ValidateInterfaceName(rt.IfName); err != nil { - return nil, err - } - - newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) - if err != nil { - return nil, err - } - - return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) -} - -// AddNetworkList executes a sequence of plugins with the ADD command -func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { - var err error - var result types.Result - for _, net := range list.Plugins { - result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) - if err != nil { - return nil, err - } - } - - if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { - return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err) - } - - return result, nil -} - -func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) - if err != nil { - return err - } - - newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) - if err != nil { - return err - } - - return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) -} - -// CheckNetworkList executes a sequence of plugins with the CHECK command -func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { - // CHECK was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { - return err - } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) - } - - if list.DisableCheck { - return nil - } - - cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) - } - - for _, net := range list.Plugins { - if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { - return err - } - } - - return nil -} - -func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) - if err != nil { - return err - } - - newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) - if err != nil { - return err - } - - return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) -} - -// DelNetworkList executes a sequence of plugins with the DEL command -func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { - var cachedResult types.Result - - // Cached result on DEL was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { - return err - } else if gtet { - cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) - } - } - - for i := len(list.Plugins) - 1; i >= 0; i-- { - net := list.Plugins[i] - if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { - return err - } - } - _ = c.cacheDel(list.Name, rt) - - return nil -} - -// AddNetwork executes the plugin with the ADD command -func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) - if err != nil { - return nil, err - } - - if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { - return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err) - } - - return result, nil -} - -// CheckNetwork executes the plugin with the CHECK command -func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { - // CHECK was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { - return err - } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) - } - - cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) - } - return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) -} - -// DelNetwork executes the plugin with the DEL command -func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { - var cachedResult types.Result - - // Cached result on DEL was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { - return err - } else if gtet { - cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) - } - } - - if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { - return err - } - _ = c.cacheDel(net.Network.Name, rt) - return nil -} - -// ValidateNetworkList checks that a configuration is reasonably valid. -// - all the specified plugins exist on disk -// - every plugin supports the desired version. -// -// Returns a list of all capabilities supported by the configuration, or error -func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { - version := list.CNIVersion - - // holding map for seen caps (in case of duplicates) - caps := map[string]interface{}{} - - errs := []error{} - for _, net := range list.Plugins { - if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { - errs = append(errs, err) - } - for c, enabled := range net.Network.Capabilities { - if !enabled { - continue - } - caps[c] = struct{}{} - } - } - - if len(errs) > 0 { - return nil, fmt.Errorf("%v", errs) - } - - // make caps list - cc := make([]string, 0, len(caps)) - for c := range caps { - cc = append(cc, c) - } - - return cc, nil -} - -// ValidateNetwork checks that a configuration is reasonably valid. -// It uses the same logic as ValidateNetworkList) -// Returns a list of capabilities -func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { - caps := []string{} - for c, ok := range net.Network.Capabilities { - if ok { - caps = append(caps, c) - } - } - if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { - return nil, err - } - return caps, nil -} - -// validatePlugin checks that an individual plugin's configuration is sane -func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(pluginName, c.Path) - if err != nil { - return err - } - if expectedVersion == "" { - expectedVersion = "0.1.0" - } - - vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) - if err != nil { - return err - } - for _, vers := range vi.SupportedVersions() { - if vers == expectedVersion { - return nil - } - } - return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) -} - -// GetVersionInfo reports which versions of the CNI spec are supported by -// the given plugin. -func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(pluginType, c.Path) - if err != nil { - return nil, err - } - - return invoke.GetVersionInfo(ctx, pluginPath, c.exec) -} - -// ===== -func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { - return &invoke.Args{ - Command: action, - ContainerID: rt.ContainerID, - NetNS: rt.NetNS, - PluginArgs: rt.Args, - IfName: rt.IfName, - Path: strings.Join(c.Path, string(os.PathListSeparator)), - } -} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go deleted file mode 100644 index d8920cf..0000000 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package libcni - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" -) - -type NotFoundError struct { - Dir string - Name string -} - -func (e NotFoundError) Error() string { - return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) -} - -type NoConfigsFoundError struct { - Dir string -} - -func (e NoConfigsFoundError) Error() string { - return fmt.Sprintf(`no net configurations found in %s`, e.Dir) -} - -func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { - conf := &NetworkConfig{Bytes: bytes} - if err := json.Unmarshal(bytes, &conf.Network); err != nil { - return nil, fmt.Errorf("error parsing configuration: %s", err) - } - if conf.Network.Type == "" { - return nil, fmt.Errorf("error parsing configuration: missing 'type'") - } - return conf, nil -} - -func ConfFromFile(filename string) (*NetworkConfig, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("error reading %s: %s", filename, err) - } - return ConfFromBytes(bytes) -} - -func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { - rawList := make(map[string]interface{}) - if err := json.Unmarshal(bytes, &rawList); err != nil { - return nil, fmt.Errorf("error parsing configuration list: %s", err) - } - - rawName, ok := rawList["name"] - if !ok { - return nil, fmt.Errorf("error parsing configuration list: no name") - } - name, ok := rawName.(string) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) - } - - var cniVersion string - rawVersion, ok := rawList["cniVersion"] - if ok { - cniVersion, ok = rawVersion.(string) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) - } - } - - disableCheck := false - if rawDisableCheck, ok := rawList["disableCheck"]; ok { - disableCheck, ok = rawDisableCheck.(bool) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) - } - } - - list := &NetworkConfigList{ - Name: name, - DisableCheck: disableCheck, - CNIVersion: cniVersion, - Bytes: bytes, - } - - var plugins []interface{} - plug, ok := rawList["plugins"] - if !ok { - return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") - } - plugins, ok = plug.([]interface{}) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) - } - if len(plugins) == 0 { - return nil, fmt.Errorf("error parsing configuration list: no plugins in list") - } - - for i, conf := range plugins { - newBytes, err := json.Marshal(conf) - if err != nil { - return nil, fmt.Errorf("failed to marshal plugin config %d: %v", i, err) - } - netConf, err := ConfFromBytes(newBytes) - if err != nil { - return nil, fmt.Errorf("failed to parse plugin config %d: %v", i, err) - } - list.Plugins = append(list.Plugins, netConf) - } - - return list, nil -} - -func ConfListFromFile(filename string) (*NetworkConfigList, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("error reading %s: %s", filename, err) - } - return ConfListFromBytes(bytes) -} - -func ConfFiles(dir string, extensions []string) ([]string, error) { - // In part, adapted from rkt/networking/podenv.go#listFiles - files, err := ioutil.ReadDir(dir) - switch { - case err == nil: // break - case os.IsNotExist(err): - return nil, nil - default: - return nil, err - } - - confFiles := []string{} - for _, f := range files { - if f.IsDir() { - continue - } - fileExt := filepath.Ext(f.Name()) - for _, ext := range extensions { - if fileExt == ext { - confFiles = append(confFiles, filepath.Join(dir, f.Name())) - } - } - } - return confFiles, nil -} - -func LoadConf(dir, name string) (*NetworkConfig, error) { - files, err := ConfFiles(dir, []string{".conf", ".json"}) - switch { - case err != nil: - return nil, err - case len(files) == 0: - return nil, NoConfigsFoundError{Dir: dir} - } - sort.Strings(files) - - for _, confFile := range files { - conf, err := ConfFromFile(confFile) - if err != nil { - return nil, err - } - if conf.Network.Name == name { - return conf, nil - } - } - return nil, NotFoundError{dir, name} -} - -func LoadConfList(dir, name string) (*NetworkConfigList, error) { - files, err := ConfFiles(dir, []string{".conflist"}) - if err != nil { - return nil, err - } - sort.Strings(files) - - for _, confFile := range files { - conf, err := ConfListFromFile(confFile) - if err != nil { - return nil, err - } - if conf.Name == name { - return conf, nil - } - } - - // Try and load a network configuration file (instead of list) - // from the same name, then upconvert. - singleConf, err := LoadConf(dir, name) - if err != nil { - // A little extra logic so the error makes sense - if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { - // Config lists found but no config files found - return nil, NotFoundError{dir, name} - } - - return nil, err - } - return ConfListFromConf(singleConf) -} - -func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { - config := make(map[string]interface{}) - err := json.Unmarshal(original.Bytes, &config) - if err != nil { - return nil, fmt.Errorf("unmarshal existing network bytes: %s", err) - } - - for key, value := range newValues { - if key == "" { - return nil, fmt.Errorf("keys cannot be empty") - } - - if value == nil { - return nil, fmt.Errorf("key '%s' value must not be nil", key) - } - - config[key] = value - } - - newBytes, err := json.Marshal(config) - if err != nil { - return nil, err - } - - return ConfFromBytes(newBytes) -} - -// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, -// with the single network as the only entry in the list. -func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { - // Re-deserialize the config's json, then make a raw map configlist. - // This may seem a bit strange, but it's to make the Bytes fields - // actually make sense. Otherwise, the generated json is littered with - // golang default values. - - rawConfig := make(map[string]interface{}) - if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { - return nil, err - } - - rawConfigList := map[string]interface{}{ - "name": original.Network.Name, - "cniVersion": original.Network.CNIVersion, - "plugins": []interface{}{rawConfig}, - } - - b, err := json.Marshal(rawConfigList) - if err != nil { - return nil, err - } - return ConfListFromBytes(b) -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go deleted file mode 100644 index 3cdb4bc..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "fmt" - "os" - "strings" -) - -type CNIArgs interface { - // For use with os/exec; i.e., return nil to inherit the - // environment from this process - // For use in delegation; inherit the environment from this - // process and allow overrides - AsEnv() []string -} - -type inherited struct{} - -var inheritArgsFromEnv inherited - -func (*inherited) AsEnv() []string { - return nil -} - -func ArgsFromEnv() CNIArgs { - return &inheritArgsFromEnv -} - -type Args struct { - Command string - ContainerID string - NetNS string - PluginArgs [][2]string - PluginArgsStr string - IfName string - Path string -} - -// Args implements the CNIArgs interface -var _ CNIArgs = &Args{} - -func (args *Args) AsEnv() []string { - env := os.Environ() - pluginArgsStr := args.PluginArgsStr - if pluginArgsStr == "" { - pluginArgsStr = stringify(args.PluginArgs) - } - - // Duplicated values which come first will be overridden, so we must put the - // custom values in the end to avoid being overridden by the process environments. - env = append(env, - "CNI_COMMAND="+args.Command, - "CNI_CONTAINERID="+args.ContainerID, - "CNI_NETNS="+args.NetNS, - "CNI_ARGS="+pluginArgsStr, - "CNI_IFNAME="+args.IfName, - "CNI_PATH="+args.Path, - ) - return dedupEnv(env) -} - -// taken from rkt/networking/net_plugin.go -func stringify(pluginArgs [][2]string) string { - entries := make([]string, len(pluginArgs)) - - for i, kv := range pluginArgs { - entries[i] = strings.Join(kv[:], "=") - } - - return strings.Join(entries, ";") -} - -// DelegateArgs implements the CNIArgs interface -// used for delegation to inherit from environments -// and allow some overrides like CNI_COMMAND -var _ CNIArgs = &DelegateArgs{} - -type DelegateArgs struct { - Command string -} - -func (d *DelegateArgs) AsEnv() []string { - env := os.Environ() - - // The custom values should come in the end to override the existing - // process environment of the same key. - env = append(env, - "CNI_COMMAND="+d.Command, - ) - return dedupEnv(env) -} - -// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. -// Items not of the normal environment "key=value" form are preserved unchanged. -func dedupEnv(env []string) []string { - out := make([]string, 0, len(env)) - envMap := map[string]string{} - - for _, kv := range env { - // find the first "=" in environment, if not, just keep it - eq := strings.Index(kv, "=") - if eq < 0 { - out = append(out, kv) - continue - } - envMap[kv[:eq]] = kv[eq+1:] - } - - for k, v := range envMap { - out = append(out, fmt.Sprintf("%s=%s", k, v)) - } - - return out -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go deleted file mode 100644 index 8defe4d..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "context" - "os" - "path/filepath" - - "github.com/containernetworking/cni/pkg/types" -) - -func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { - if exec == nil { - exec = defaultExec - } - - paths := filepath.SplitList(os.Getenv("CNI_PATH")) - pluginPath, err := exec.FindInPath(delegatePlugin, paths) - if err != nil { - return "", nil, err - } - - return pluginPath, exec, nil -} - -// DelegateAdd calls the given delegate plugin with the CNI ADD action and -// JSON configuration -func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return nil, err - } - - // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD - return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) -} - -// DelegateCheck calls the given delegate plugin with the CNI CHECK action and -// JSON configuration -func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return err - } - - // DelegateCheck will override the original CNI_COMMAND env from process with CHECK - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) -} - -// DelegateDel calls the given delegate plugin with the CNI DEL action and -// JSON configuration -func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return err - } - - // DelegateDel will override the original CNI_COMMAND env from process with DEL - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) -} - -// return CNIArgs used by delegation -func delegateArgs(action string) *DelegateArgs { - return &DelegateArgs{ - Command: action, - } -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go deleted file mode 100644 index 8e6d30b..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "context" - "fmt" - "os" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/version" -) - -// Exec is an interface encapsulates all operations that deal with finding -// and executing a CNI plugin. Tests may provide a fake implementation -// to avoid writing fake plugins to temporary directories during the test. -type Exec interface { - ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) - FindInPath(plugin string, paths []string) (string, error) - Decode(jsonBytes []byte) (version.PluginInfo, error) -} - -// For example, a testcase could pass an instance of the following fakeExec -// object to ExecPluginWithResult() to verify the incoming stdin and environment -// and provide a tailored response: -// -//import ( -// "encoding/json" -// "path" -// "strings" -//) -// -//type fakeExec struct { -// version.PluginDecoder -//} -// -//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { -// net := &types.NetConf{} -// err := json.Unmarshal(stdinData, net) -// if err != nil { -// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) -// } -// pluginName := path.Base(pluginPath) -// if pluginName != net.Type { -// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) -// } -// for _, e := range environ { -// // Check environment for forced failure request -// parts := strings.Split(e, "=") -// if len(parts) > 0 && parts[0] == "FAIL" { -// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) -// } -// } -// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil -//} -// -//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { -// if len(paths) > 0 { -// return path.Join(paths[0], plugin), nil -// } -// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) -//} - -func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { - if exec == nil { - exec = defaultExec - } - - stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) - if err != nil { - return nil, err - } - - // Plugin must return result in same version as specified in netconf - versionDecoder := &version.ConfigDecoder{} - confVersion, err := versionDecoder.Decode(netconf) - if err != nil { - return nil, err - } - - return version.NewResult(confVersion, stdoutBytes) -} - -func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { - if exec == nil { - exec = defaultExec - } - _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) - return err -} - -// GetVersionInfo returns the version information available about the plugin. -// For recent-enough plugins, it uses the information returned by the VERSION -// command. For older plugins which do not recognize that command, it reports -// version 0.1.0 -func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { - if exec == nil { - exec = defaultExec - } - args := &Args{ - Command: "VERSION", - - // set fake values required by plugins built against an older version of skel - NetNS: "dummy", - IfName: "dummy", - Path: "dummy", - } - stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) - stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) - if err != nil { - if err.Error() == "unknown CNI_COMMAND: VERSION" { - return version.PluginSupports("0.1.0"), nil - } - return nil, err - } - - return exec.Decode(stdoutBytes) -} - -// DefaultExec is an object that implements the Exec interface which looks -// for and executes plugins from disk. -type DefaultExec struct { - *RawExec - version.PluginDecoder -} - -// DefaultExec implements the Exec interface -var _ Exec = &DefaultExec{} - -var defaultExec = &DefaultExec{ - RawExec: &RawExec{Stderr: os.Stderr}, -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go deleted file mode 100644 index e62029e..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "fmt" - "os" - "path/filepath" - "strings" -) - -// FindInPath returns the full path of the plugin by searching in the provided path -func FindInPath(plugin string, paths []string) (string, error) { - if plugin == "" { - return "", fmt.Errorf("no plugin name provided") - } - - if strings.ContainsRune(plugin, os.PathSeparator) { - return "", fmt.Errorf("invalid plugin name: %s", plugin) - } - - if len(paths) == 0 { - return "", fmt.Errorf("no paths provided") - } - - for _, path := range paths { - for _, fe := range ExecutableFileExtensions { - fullpath := filepath.Join(path, plugin) + fe - if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { - return fullpath, nil - } - } - } - - return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go deleted file mode 100644 index 9bcfb45..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package invoke - -// Valid file extensions for plugin executables. -var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go deleted file mode 100644 index 7665125..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -// Valid file extensions for plugin executables. -var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go deleted file mode 100644 index 5ab5cc8..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "os/exec" - "strings" - "time" - - "github.com/containernetworking/cni/pkg/types" -) - -type RawExec struct { - Stderr io.Writer -} - -func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { - stdout := &bytes.Buffer{} - stderr := &bytes.Buffer{} - c := exec.CommandContext(ctx, pluginPath) - c.Env = environ - c.Stdin = bytes.NewBuffer(stdinData) - c.Stdout = stdout - c.Stderr = stderr - - // Retry the command on "text file busy" errors - for i := 0; i <= 5; i++ { - err := c.Run() - - // Command succeeded - if err == nil { - break - } - - // If the plugin is currently about to be written, then we wait a - // second and try it again - if strings.Contains(err.Error(), "text file busy") { - time.Sleep(time.Second) - continue - } - - // All other errors except than the busy text file - return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes()) - } - - // Copy stderr to caller's buffer in case plugin printed to both - // stdout and stderr for some reason. Ignore failures as stderr is - // only informational. - if e.Stderr != nil && stderr.Len() > 0 { - _, _ = stderr.WriteTo(e.Stderr) - } - return stdout.Bytes(), nil -} - -func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error { - emsg := types.Error{} - if len(stdout) == 0 { - if len(stderr) == 0 { - emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err) - } else { - emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr)) - } - } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { - emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr) - } - return &emsg -} - -func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { - return FindInPath(plugin, paths) -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go deleted file mode 100644 index 36f3167..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types020 - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - - "github.com/containernetworking/cni/pkg/types" -) - -const ImplementedSpecVersion string = "0.2.0" - -var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} - -// Compatibility types for CNI version 0.1.0 and 0.2.0 - -func NewResult(data []byte) (types.Result, error) { - result := &Result{} - if err := json.Unmarshal(data, result); err != nil { - return nil, err - } - return result, nil -} - -func GetResult(r types.Result) (*Result, error) { - // We expect version 0.1.0/0.2.0 results - result020, err := r.GetAsVersion(ImplementedSpecVersion) - if err != nil { - return nil, err - } - result, ok := result020.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - return result, nil -} - -// Result is what gets returned from the plugin (via stdout) to the caller -type Result struct { - CNIVersion string `json:"cniVersion,omitempty"` - IP4 *IPConfig `json:"ip4,omitempty"` - IP6 *IPConfig `json:"ip6,omitempty"` - DNS types.DNS `json:"dns,omitempty"` -} - -func (r *Result) Version() string { - return ImplementedSpecVersion -} - -func (r *Result) GetAsVersion(version string) (types.Result, error) { - for _, supportedVersion := range SupportedVersions { - if version == supportedVersion { - r.CNIVersion = version - return r, nil - } - } - return nil, fmt.Errorf("cannot convert version %q to %s", SupportedVersions, version) -} - -func (r *Result) Print() error { - return r.PrintTo(os.Stdout) -} - -func (r *Result) PrintTo(writer io.Writer) error { - data, err := json.MarshalIndent(r, "", " ") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// IPConfig contains values necessary to configure an interface -type IPConfig struct { - IP net.IPNet - Gateway net.IP - Routes []types.Route -} - -// net.IPNet is not JSON (un)marshallable so this duality is needed -// for our custom IPNet type - -// JSON (un)marshallable types -type ipConfig struct { - IP types.IPNet `json:"ip"` - Gateway net.IP `json:"gateway,omitempty"` - Routes []types.Route `json:"routes,omitempty"` -} - -func (c *IPConfig) MarshalJSON() ([]byte, error) { - ipc := ipConfig{ - IP: types.IPNet(c.IP), - Gateway: c.Gateway, - Routes: c.Routes, - } - - return json.Marshal(ipc) -} - -func (c *IPConfig) UnmarshalJSON(data []byte) error { - ipc := ipConfig{} - if err := json.Unmarshal(data, &ipc); err != nil { - return err - } - - c.IP = net.IPNet(ipc.IP) - c.Gateway = ipc.Gateway - c.Routes = ipc.Routes - return nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go deleted file mode 100644 index 4eac648..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding" - "fmt" - "reflect" - "strings" -) - -// UnmarshallableBool typedef for builtin bool -// because builtin type's methods can't be declared -type UnmarshallableBool bool - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns boolean true if the string is "1" or "[Tt]rue" -// Returns boolean false if the string is "0" or "[Ff]alse" -func (b *UnmarshallableBool) UnmarshalText(data []byte) error { - s := strings.ToLower(string(data)) - switch s { - case "1", "true": - *b = true - case "0", "false": - *b = false - default: - return fmt.Errorf("boolean unmarshal error: invalid input %s", s) - } - return nil -} - -// UnmarshallableString typedef for builtin string -type UnmarshallableString string - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns the string -func (s *UnmarshallableString) UnmarshalText(data []byte) error { - *s = UnmarshallableString(data) - return nil -} - -// CommonArgs contains the IgnoreUnknown argument -// and must be embedded by all Arg structs -type CommonArgs struct { - IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` -} - -// GetKeyField is a helper function to receive Values -// Values that represent a pointer to a struct -func GetKeyField(keyString string, v reflect.Value) reflect.Value { - return v.Elem().FieldByName(keyString) -} - -// UnmarshalableArgsError is used to indicate error unmarshalling args -// from the args-string in the form "K=V;K2=V2;..." -type UnmarshalableArgsError struct { - error -} - -// LoadArgs parses args from a string in the form "K=V;K2=V2;..." -func LoadArgs(args string, container interface{}) error { - if args == "" { - return nil - } - - containerValue := reflect.ValueOf(container) - - pairs := strings.Split(args, ";") - unknownArgs := []string{} - for _, pair := range pairs { - kv := strings.Split(pair, "=") - if len(kv) != 2 { - return fmt.Errorf("ARGS: invalid pair %q", pair) - } - keyString := kv[0] - valueString := kv[1] - keyField := GetKeyField(keyString, containerValue) - if !keyField.IsValid() { - unknownArgs = append(unknownArgs, pair) - continue - } - keyFieldIface := keyField.Addr().Interface() - u, ok := keyFieldIface.(encoding.TextUnmarshaler) - if !ok { - return UnmarshalableArgsError{fmt.Errorf( - "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", - keyString, reflect.TypeOf(keyFieldIface))} - } - err := u.UnmarshalText([]byte(valueString)) - if err != nil { - return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err) - } - } - - isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() - if len(unknownArgs) > 0 && !isIgnoreUnknown { - return fmt.Errorf("ARGS: unknown args %q", unknownArgs) - } - return nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go deleted file mode 100644 index 754cc6e..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package current - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/020" -) - -const ImplementedSpecVersion string = "0.4.0" - -var SupportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} - -func NewResult(data []byte) (types.Result, error) { - result := &Result{} - if err := json.Unmarshal(data, result); err != nil { - return nil, err - } - return result, nil -} - -func GetResult(r types.Result) (*Result, error) { - resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) - if err != nil { - return nil, err - } - result, ok := resultCurrent.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - return result, nil -} - -var resultConverters = []struct { - versions []string - convert func(types.Result) (*Result, error) -}{ - {types020.SupportedVersions, convertFrom020}, - {SupportedVersions, convertFrom030}, -} - -func convertFrom020(result types.Result) (*Result, error) { - oldResult, err := types020.GetResult(result) - if err != nil { - return nil, err - } - - newResult := &Result{ - CNIVersion: ImplementedSpecVersion, - DNS: oldResult.DNS, - Routes: []*types.Route{}, - } - - if oldResult.IP4 != nil { - newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "4", - Address: oldResult.IP4.IP, - Gateway: oldResult.IP4.Gateway, - }) - for _, route := range oldResult.IP4.Routes { - newResult.Routes = append(newResult.Routes, &types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - if oldResult.IP6 != nil { - newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "6", - Address: oldResult.IP6.IP, - Gateway: oldResult.IP6.Gateway, - }) - for _, route := range oldResult.IP6.Routes { - newResult.Routes = append(newResult.Routes, &types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - return newResult, nil -} - -func convertFrom030(result types.Result) (*Result, error) { - newResult, ok := result.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - newResult.CNIVersion = ImplementedSpecVersion - return newResult, nil -} - -func NewResultFromResult(result types.Result) (*Result, error) { - version := result.Version() - for _, converter := range resultConverters { - for _, supportedVersion := range converter.versions { - if version == supportedVersion { - return converter.convert(result) - } - } - } - return nil, fmt.Errorf("unsupported CNI result22 version %q", version) -} - -// Result is what gets returned from the plugin (via stdout) to the caller -type Result struct { - CNIVersion string `json:"cniVersion,omitempty"` - Interfaces []*Interface `json:"interfaces,omitempty"` - IPs []*IPConfig `json:"ips,omitempty"` - Routes []*types.Route `json:"routes,omitempty"` - DNS types.DNS `json:"dns,omitempty"` -} - -// Convert to the older 0.2.0 CNI spec Result type -func (r *Result) convertTo020() (*types020.Result, error) { - oldResult := &types020.Result{ - CNIVersion: types020.ImplementedSpecVersion, - DNS: r.DNS, - } - - for _, ip := range r.IPs { - // Only convert the first IP address of each version as 0.2.0 - // and earlier cannot handle multiple IP addresses - if ip.Version == "4" && oldResult.IP4 == nil { - oldResult.IP4 = &types020.IPConfig{ - IP: ip.Address, - Gateway: ip.Gateway, - } - } else if ip.Version == "6" && oldResult.IP6 == nil { - oldResult.IP6 = &types020.IPConfig{ - IP: ip.Address, - Gateway: ip.Gateway, - } - } - - if oldResult.IP4 != nil && oldResult.IP6 != nil { - break - } - } - - for _, route := range r.Routes { - is4 := route.Dst.IP.To4() != nil - if is4 && oldResult.IP4 != nil { - oldResult.IP4.Routes = append(oldResult.IP4.Routes, types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } else if !is4 && oldResult.IP6 != nil { - oldResult.IP6.Routes = append(oldResult.IP6.Routes, types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - if oldResult.IP4 == nil && oldResult.IP6 == nil { - return nil, fmt.Errorf("cannot convert: no valid IP addresses") - } - - return oldResult, nil -} - -func (r *Result) Version() string { - return ImplementedSpecVersion -} - -func (r *Result) GetAsVersion(version string) (types.Result, error) { - switch version { - case "0.3.0", "0.3.1", ImplementedSpecVersion: - r.CNIVersion = version - return r, nil - case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]: - return r.convertTo020() - } - return nil, fmt.Errorf("cannot convert version 0.3.x to %q", version) -} - -func (r *Result) Print() error { - return r.PrintTo(os.Stdout) -} - -func (r *Result) PrintTo(writer io.Writer) error { - data, err := json.MarshalIndent(r, "", " ") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Convert this old version result to the current CNI version result -func (r *Result) Convert() (*Result, error) { - return r, nil -} - -// Interface contains values about the created interfaces -type Interface struct { - Name string `json:"name"` - Mac string `json:"mac,omitempty"` - Sandbox string `json:"sandbox,omitempty"` -} - -func (i *Interface) String() string { - return fmt.Sprintf("%+v", *i) -} - -// Int returns a pointer to the int value passed in. Used to -// set the IPConfig.Interface field. -func Int(v int) *int { - return &v -} - -// IPConfig contains values necessary to configure an IP address on an interface -type IPConfig struct { - // IP version, either "4" or "6" - Version string - // Index into Result structs Interfaces list - Interface *int - Address net.IPNet - Gateway net.IP -} - -func (i *IPConfig) String() string { - return fmt.Sprintf("%+v", *i) -} - -// JSON (un)marshallable types -type ipConfig struct { - Version string `json:"version"` - Interface *int `json:"interface,omitempty"` - Address types.IPNet `json:"address"` - Gateway net.IP `json:"gateway,omitempty"` -} - -func (c *IPConfig) MarshalJSON() ([]byte, error) { - ipc := ipConfig{ - Version: c.Version, - Interface: c.Interface, - Address: types.IPNet(c.Address), - Gateway: c.Gateway, - } - - return json.Marshal(ipc) -} - -func (c *IPConfig) UnmarshalJSON(data []byte) error { - ipc := ipConfig{} - if err := json.Unmarshal(data, &ipc); err != nil { - return err - } - - c.Version = ipc.Version - c.Interface = ipc.Interface - c.Address = net.IPNet(ipc.Address) - c.Gateway = ipc.Gateway - return nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go deleted file mode 100644 index 3fa757a..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" -) - -// like net.IPNet but adds JSON marshalling and unmarshalling -type IPNet net.IPNet - -// ParseCIDR takes a string like "10.2.3.1/24" and -// return IPNet with "10.2.3.1" and /24 mask -func ParseCIDR(s string) (*net.IPNet, error) { - ip, ipn, err := net.ParseCIDR(s) - if err != nil { - return nil, err - } - - ipn.IP = ip - return ipn, nil -} - -func (n IPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(&n).String()) -} - -func (n *IPNet) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - tmp, err := ParseCIDR(s) - if err != nil { - return err - } - - *n = IPNet(*tmp) - return nil -} - -// NetConf describes a network. -type NetConf struct { - CNIVersion string `json:"cniVersion,omitempty"` - - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - Capabilities map[string]bool `json:"capabilities,omitempty"` - IPAM IPAM `json:"ipam,omitempty"` - DNS DNS `json:"dns"` - - RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` - PrevResult Result `json:"-"` -} - -type IPAM struct { - Type string `json:"type,omitempty"` -} - -// NetConfList describes an ordered list of networks. -type NetConfList struct { - CNIVersion string `json:"cniVersion,omitempty"` - - Name string `json:"name,omitempty"` - DisableCheck bool `json:"disableCheck,omitempty"` - Plugins []*NetConf `json:"plugins,omitempty"` -} - -type ResultFactoryFunc func([]byte) (Result, error) - -// Result is an interface that provides the result of plugin execution -type Result interface { - // The highest CNI specification result version the result supports - // without having to convert - Version() string - - // Returns the result converted into the requested CNI specification - // result version, or an error if conversion failed - GetAsVersion(version string) (Result, error) - - // Prints the result in JSON format to stdout - Print() error - - // Prints the result in JSON format to provided writer - PrintTo(writer io.Writer) error -} - -func PrintResult(result Result, version string) error { - newResult, err := result.GetAsVersion(version) - if err != nil { - return err - } - return newResult.Print() -} - -// DNS contains values interesting for DNS resolvers -type DNS struct { - Nameservers []string `json:"nameservers,omitempty"` - Domain string `json:"domain,omitempty"` - Search []string `json:"search,omitempty"` - Options []string `json:"options,omitempty"` -} - -type Route struct { - Dst net.IPNet - GW net.IP -} - -func (r *Route) String() string { - return fmt.Sprintf("%+v", *r) -} - -// Well known error codes -// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes -const ( - ErrUnknown uint = iota // 0 - ErrIncompatibleCNIVersion // 1 - ErrUnsupportedField // 2 - ErrUnknownContainer // 3 - ErrInvalidEnvironmentVariables // 4 - ErrIOFailure // 5 - ErrDecodingFailure // 6 - ErrInvalidNetworkConfig // 7 - ErrTryAgainLater uint = 11 - ErrInternal uint = 999 -) - -type Error struct { - Code uint `json:"code"` - Msg string `json:"msg"` - Details string `json:"details,omitempty"` -} - -func NewError(code uint, msg, details string) *Error { - return &Error{ - Code: code, - Msg: msg, - Details: details, - } -} - -func (e *Error) Error() string { - details := "" - if e.Details != "" { - details = fmt.Sprintf("; %v", e.Details) - } - return fmt.Sprintf("%v%v", e.Msg, details) -} - -func (e *Error) Print() error { - return prettyPrint(e) -} - -// net.IPNet is not JSON (un)marshallable so this duality is needed -// for our custom IPNet type - -// JSON (un)marshallable types -type route struct { - Dst IPNet `json:"dst"` - GW net.IP `json:"gw,omitempty"` -} - -func (r *Route) UnmarshalJSON(data []byte) error { - rt := route{} - if err := json.Unmarshal(data, &rt); err != nil { - return err - } - - r.Dst = net.IPNet(rt.Dst) - r.GW = rt.GW - return nil -} - -func (r Route) MarshalJSON() ([]byte, error) { - rt := route{ - Dst: IPNet(r.Dst), - GW: r.GW, - } - - return json.Marshal(rt) -} - -func prettyPrint(obj interface{}) error { - data, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - _, err = os.Stdout.Write(data) - return err -} diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go deleted file mode 100644 index b8ec388..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2019 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "bytes" - "fmt" - "regexp" - "unicode" - - "github.com/containernetworking/cni/pkg/types" -) - -const ( - // cniValidNameChars is the regexp used to validate valid characters in - // containerID and networkName - cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` - - // maxInterfaceNameLength is the length max of a valid interface name - maxInterfaceNameLength = 15 -) - -var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) - -// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters -func ValidateContainerID(containerID string) *types.Error { - - if containerID == "" { - return types.NewError(types.ErrUnknownContainer, "missing containerID", "") - } - if !cniReg.MatchString(containerID) { - return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) - } - return nil -} - -// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters -func ValidateNetworkName(networkName string) *types.Error { - - if networkName == "" { - return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") - } - if !cniReg.MatchString(networkName) { - return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) - } - return nil -} - -// ValidateInterfaceName will validate the interface name based on the three rules below -// 1. The name must not be empty -// 2. The name must be less than 16 characters -// 3. The name must not be "." or ".." -// 3. The name must not contain / or : or any whitespace characters -// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 -func ValidateInterfaceName(ifName string) *types.Error { - if len(ifName) == 0 { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") - } - if len(ifName) > maxInterfaceNameLength { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) - } - if ifName == "." || ifName == ".." { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") - } - for _, r := range bytes.Runes([]byte(ifName)) { - if r == '/' || r == ':' || unicode.IsSpace(r) { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") - } - } - - return nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go deleted file mode 100644 index 3cca58b..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/conf.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" -) - -// ConfigDecoder can decode the CNI version available in network config data -type ConfigDecoder struct{} - -func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { - var conf struct { - CNIVersion string `json:"cniVersion"` - } - err := json.Unmarshal(jsonBytes, &conf) - if err != nil { - return "", fmt.Errorf("decoding version from network config: %s", err) - } - if conf.CNIVersion == "" { - return "0.1.0", nil - } - return conf.CNIVersion, nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go deleted file mode 100644 index 1df4272..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -// PluginInfo reports information about CNI versioning -type PluginInfo interface { - // SupportedVersions returns one or more CNI spec versions that the plugin - // supports. If input is provided in one of these versions, then the plugin - // promises to use the same CNI version in its response - SupportedVersions() []string - - // Encode writes this CNI version information as JSON to the given Writer - Encode(io.Writer) error -} - -type pluginInfo struct { - CNIVersion_ string `json:"cniVersion"` - SupportedVersions_ []string `json:"supportedVersions,omitempty"` -} - -// pluginInfo implements the PluginInfo interface -var _ PluginInfo = &pluginInfo{} - -func (p *pluginInfo) Encode(w io.Writer) error { - return json.NewEncoder(w).Encode(p) -} - -func (p *pluginInfo) SupportedVersions() []string { - return p.SupportedVersions_ -} - -// PluginSupports returns a new PluginInfo that will report the given versions -// as supported -func PluginSupports(supportedVersions ...string) PluginInfo { - if len(supportedVersions) < 1 { - panic("programmer error: you must support at least one version") - } - return &pluginInfo{ - CNIVersion_: Current(), - SupportedVersions_: supportedVersions, - } -} - -// PluginDecoder can decode the response returned by a plugin's VERSION command -type PluginDecoder struct{} - -func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { - var info pluginInfo - err := json.Unmarshal(jsonBytes, &info) - if err != nil { - return nil, fmt.Errorf("decoding version info: %s", err) - } - if info.CNIVersion_ == "" { - return nil, fmt.Errorf("decoding version info: missing field cniVersion") - } - if len(info.SupportedVersions_) == 0 { - if info.CNIVersion_ == "0.2.0" { - return PluginSupports("0.1.0", "0.2.0"), nil - } - return nil, fmt.Errorf("decoding version info: missing field supportedVersions") - } - return &info, nil -} - -// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, -// minor, and micro numbers or returns an error -func ParseVersion(version string) (int, int, int, error) { - var major, minor, micro int - if version == "" { - return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) - } - - parts := strings.Split(version, ".") - if len(parts) >= 4 { - return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) - } - - major, err := strconv.Atoi(parts[0]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %v", parts[0], err) - } - - if len(parts) >= 2 { - minor, err = strconv.Atoi(parts[1]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %v", parts[1], err) - } - } - - if len(parts) >= 3 { - micro, err = strconv.Atoi(parts[2]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %v", parts[2], err) - } - } - - return major, minor, micro, nil -} - -// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro -// numbers, and compares them to determine whether the first version is greater -// than or equal to the second -func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { - firstMajor, firstMinor, firstMicro, err := ParseVersion(version) - if err != nil { - return false, err - } - - secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) - if err != nil { - return false, err - } - - if firstMajor > secondMajor { - return true, nil - } else if firstMajor == secondMajor { - if firstMinor > secondMinor { - return true, nil - } else if firstMinor == secondMinor && firstMicro >= secondMicro { - return true, nil - } - } - return false, nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go deleted file mode 100644 index 25c3810..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import "fmt" - -type ErrorIncompatible struct { - Config string - Supported []string -} - -func (e *ErrorIncompatible) Details() string { - return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) -} - -func (e *ErrorIncompatible) Error() string { - return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) -} - -type Reconciler struct{} - -func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { - return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) -} - -func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { - for _, supportedVersion := range supportedVersions { - if configVersion == supportedVersion { - return nil - } - } - - return &ErrorIncompatible{ - Config: configVersion, - Supported: supportedVersions, - } -} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go deleted file mode 100644 index 8f3508e..0000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/020" - "github.com/containernetworking/cni/pkg/types/current" -) - -// Current reports the version of the CNI spec implemented by this library -func Current() string { - return "0.4.0" -} - -// Legacy PluginInfo describes a plugin that is backwards compatible with the -// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 -// library ought to work correctly with a plugin that reports support for -// Legacy versions. -// -// Any future CNI spec versions which meet this definition should be added to -// this list. -var Legacy = PluginSupports("0.1.0", "0.2.0") -var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0") - -var resultFactories = []struct { - supportedVersions []string - newResult types.ResultFactoryFunc -}{ - {current.SupportedVersions, current.NewResult}, - {types020.SupportedVersions, types020.NewResult}, -} - -// Finds a Result object matching the requested version (if any) and asks -// that object to parse the plugin result, returning an error if parsing failed. -func NewResult(version string, resultBytes []byte) (types.Result, error) { - reconciler := &Reconciler{} - for _, resultFactory := range resultFactories { - err := reconciler.CheckRaw(version, resultFactory.supportedVersions) - if err == nil { - // Result supports this version - return resultFactory.newResult(resultBytes) - } - } - - return nil, fmt.Errorf("unsupported CNI result version %q", version) -} - -// ParsePrevResult parses a prevResult in a NetConf structure and sets -// the NetConf's PrevResult member to the parsed Result object. -func ParsePrevResult(conf *types.NetConf) error { - if conf.RawPrevResult == nil { - return nil - } - - resultBytes, err := json.Marshal(conf.RawPrevResult) - if err != nil { - return fmt.Errorf("could not serialize prevResult: %v", err) - } - - conf.RawPrevResult = nil - conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes) - if err != nil { - return fmt.Errorf("could not parse prevResult: %v", err) - } - - return nil -} diff --git a/vendor/github.com/containernetworking/plugins/LICENSE b/vendor/github.com/containernetworking/plugins/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/github.com/containernetworking/plugins/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md deleted file mode 100644 index 1e265c7..0000000 --- a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md +++ /dev/null @@ -1,41 +0,0 @@ -### Namespaces, Threads, and Go -On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code. - -### Namespace Switching -Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads. - -Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in. - -For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly. - -### Do() The Recommended Thing -The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example: - -```go -err = targetNs.Do(func(hostNs ns.NetNS) error { - dummy := &netlink.Dummy{ - LinkAttrs: netlink.LinkAttrs{ - Name: "dummy0", - }, - } - return netlink.LinkAdd(dummy) -}) -``` - -Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem. - -When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled. - -In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. - - -### Creating network namespaces -Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration. - - -### Further Reading - - https://github.com/golang/go/wiki/LockOSThread - - http://morsmachine.dk/go-scheduler - - https://github.com/containernetworking/cni/issues/262 - - https://golang.org/pkg/runtime/ - - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go deleted file mode 100644 index 3b745d4..0000000 --- a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2015-2017 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ns - -import ( - "fmt" - "os" - "runtime" - "sync" - "syscall" - - "golang.org/x/sys/unix" -) - -// Returns an object representing the current OS thread's network namespace -func GetCurrentNS() (NetNS, error) { - // Lock the thread in case other goroutine executes in it and changes its - // network namespace after getCurrentThreadNetNSPath(), otherwise it might - // return an unexpected network namespace. - runtime.LockOSThread() - defer runtime.UnlockOSThread() - return GetNS(getCurrentThreadNetNSPath()) -} - -func getCurrentThreadNetNSPath() string { - // /proc/self/ns/net returns the namespace of the main thread, not - // of whatever thread this goroutine is running on. Make sure we - // use the thread's net namespace since the thread is switching around - return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) -} - -func (ns *netNS) Close() error { - if err := ns.errorIfClosed(); err != nil { - return err - } - - if err := ns.file.Close(); err != nil { - return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err) - } - ns.closed = true - - return nil -} - -func (ns *netNS) Set() error { - if err := ns.errorIfClosed(); err != nil { - return err - } - - if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil { - return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err) - } - - return nil -} - -type NetNS interface { - // Executes the passed closure in this object's network namespace, - // attempting to restore the original namespace before returning. - // However, since each OS thread can have a different network namespace, - // and Go's thread scheduling is highly variable, callers cannot - // guarantee any specific namespace is set unless operations that - // require that namespace are wrapped with Do(). Also, no code called - // from Do() should call runtime.UnlockOSThread(), or the risk - // of executing code in an incorrect namespace will be greater. See - // https://github.com/golang/go/wiki/LockOSThread for further details. - Do(toRun func(NetNS) error) error - - // Sets the current network namespace to this object's network namespace. - // Note that since Go's thread scheduling is highly variable, callers - // cannot guarantee the requested namespace will be the current namespace - // after this function is called; to ensure this wrap operations that - // require the namespace with Do() instead. - Set() error - - // Returns the filesystem path representing this object's network namespace - Path() string - - // Returns a file descriptor representing this object's network namespace - Fd() uintptr - - // Cleans up this instance of the network namespace; if this instance - // is the last user the namespace will be destroyed - Close() error -} - -type netNS struct { - file *os.File - closed bool -} - -// netNS implements the NetNS interface -var _ NetNS = &netNS{} - -const ( - // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h - NSFS_MAGIC = 0x6e736673 - PROCFS_MAGIC = 0x9fa0 -) - -type NSPathNotExistErr struct{ msg string } - -func (e NSPathNotExistErr) Error() string { return e.msg } - -type NSPathNotNSErr struct{ msg string } - -func (e NSPathNotNSErr) Error() string { return e.msg } - -func IsNSorErr(nspath string) error { - stat := syscall.Statfs_t{} - if err := syscall.Statfs(nspath, &stat); err != nil { - if os.IsNotExist(err) { - err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)} - } else { - err = fmt.Errorf("failed to Statfs %q: %v", nspath, err) - } - return err - } - - switch stat.Type { - case PROCFS_MAGIC, NSFS_MAGIC: - return nil - default: - return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)} - } -} - -// Returns an object representing the namespace referred to by @path -func GetNS(nspath string) (NetNS, error) { - err := IsNSorErr(nspath) - if err != nil { - return nil, err - } - - fd, err := os.Open(nspath) - if err != nil { - return nil, err - } - - return &netNS{file: fd}, nil -} - -func (ns *netNS) Path() string { - return ns.file.Name() -} - -func (ns *netNS) Fd() uintptr { - return ns.file.Fd() -} - -func (ns *netNS) errorIfClosed() error { - if ns.closed { - return fmt.Errorf("%q has already been closed", ns.file.Name()) - } - return nil -} - -func (ns *netNS) Do(toRun func(NetNS) error) error { - if err := ns.errorIfClosed(); err != nil { - return err - } - - containedCall := func(hostNS NetNS) error { - threadNS, err := GetCurrentNS() - if err != nil { - return fmt.Errorf("failed to open current netns: %v", err) - } - defer threadNS.Close() - - // switch to target namespace - if err = ns.Set(); err != nil { - return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err) - } - defer func() { - err := threadNS.Set() // switch back - if err == nil { - // Unlock the current thread only when we successfully switched back - // to the original namespace; otherwise leave the thread locked which - // will force the runtime to scrap the current thread, that is maybe - // not as optimal but at least always safe to do. - runtime.UnlockOSThread() - } - }() - - return toRun(hostNS) - } - - // save a handle to current network namespace - hostNS, err := GetCurrentNS() - if err != nil { - return fmt.Errorf("Failed to open current namespace: %v", err) - } - defer hostNS.Close() - - var wg sync.WaitGroup - wg.Add(1) - - // Start the callback in a new green thread so that if we later fail - // to switch the namespace back to the original one, we can safely - // leave the thread locked to die without a risk of the current thread - // left lingering with incorrect namespace. - var innerError error - go func() { - defer wg.Done() - runtime.LockOSThread() - innerError = containedCall(hostNS) - }() - wg.Wait() - - return innerError -} - -// WithNetNSPath executes the passed closure under the given network -// namespace, restoring the original namespace afterwards. -func WithNetNSPath(nspath string, toRun func(NetNS) error) error { - ns, err := GetNS(nspath) - if err != nil { - return err - } - defer ns.Close() - return ns.Do(toRun) -} diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml deleted file mode 100644 index e4dd4a4..0000000 --- a/vendor/github.com/containers/ocicrypt/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -dist: bionic -language: go - -os: -- linux - -go: - - "1.13.x" - - "1.16.x" - -matrix: - include: - - os: linux - -addons: - apt: - packages: - - gnutls-bin - - softhsm2 - -go_import_path: github.com/containers/ocicrypt - -install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 - -script: - - make - - make check - - make test diff --git a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md deleted file mode 100644 index 5131b5a..0000000 --- a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -## The OCIcrypt Library Project Community Code of Conduct - -The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/ocicrypt/LICENSE b/vendor/github.com/containers/ocicrypt/LICENSE deleted file mode 100644 index 9535635..0000000 --- a/vendor/github.com/containers/ocicrypt/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS deleted file mode 100644 index e6a7d1f..0000000 --- a/vendor/github.com/containers/ocicrypt/MAINTAINERS +++ /dev/null @@ -1,5 +0,0 @@ -# ocicrypt maintainers -# -# Github ID, Name, Email Address -lumjjb, Brandon Lum, lumjjb@gmail.com -stefanberger, Stefan Berger, stefanb@linux.ibm.com diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile deleted file mode 100644 index dc9d985..0000000 --- a/vendor/github.com/containers/ocicrypt/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -.PHONY: check build decoder generate-protobuf - -all: build - -FORCE: - -check: - golangci-lint run - -build: vendor - go build ./... - -vendor: - go mod tidy - -test: - go test ./... -test.v - -generate-protobuf: - protoc -I utils/keyprovider/ utils/keyprovider/keyprovider.proto --go_out=plugins=grpc:utils/keyprovider diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md deleted file mode 100644 index 84cab7a..0000000 --- a/vendor/github.com/containers/ocicrypt/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# OCIcrypt Library - -The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools. - -Consumers of OCIcrypt: - -- [containerd/imgcrypt](https://github.com/containerd/imgcrypt) -- [cri-o](https://github.com/cri-o/cri-o) -- [skopeo](https://github.com/containers/skopeo) - - -## Usage - -There are various levels of usage for this library. The main consumers of these would be runtime/build tools, and a more specific use would be in the ability to extend cryptographic function. - -### Runtime/Build tool usage - -The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers: - -``` -package "github.com/containers/ocicrypt" -func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) -func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) -``` - -The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the responsibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). - - -### Crypto Agility and Extensibility - -The implementation for both symmetric and asymmetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: -- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers -- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping - -We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec. - -## Security Issues - -We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file. - - -## Ocicrypt Pkcs11 Support - -Ocicrypt Pkcs11 support is currently experiemental. For more details, please refer to the [this document](docs/pkcs11.md). diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md deleted file mode 100644 index 30124c8..0000000 --- a/vendor/github.com/containers/ocicrypt/SECURITY.md +++ /dev/null @@ -1,3 +0,0 @@ -## Security and Disclosure Information Policy for the OCIcrypt Library Project - -The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go deleted file mode 100644 index da403d9..0000000 --- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package blockcipher - -import ( - "io" - - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// LayerCipherType is the ciphertype as specified in the layer metadata -type LayerCipherType string - -// TODO: Should be obtained from OCI spec once included -const ( - AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256" -) - -// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt -// an image which are sensitive and should not be in plaintext -type PrivateLayerBlockCipherOptions struct { - // SymmetricKey represents the symmetric key used for encryption/decryption - // This field should be populated by Encrypt/Decrypt calls - SymmetricKey []byte `json:"symkey"` - - // Digest is the digest of the original data for verification. - // This is NOT populated by Encrypt/Decrypt calls - Digest digest.Digest `json:"digest"` - - // CipherOptions contains the cipher metadata used for encryption/decryption - // This field should be populated by Encrypt/Decrypt calls - CipherOptions map[string][]byte `json:"cipheroptions"` -} - -// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt -// an image which are public and can be deduplicated in plaintext across multiple -// recipients -type PublicLayerBlockCipherOptions struct { - // CipherType denotes the cipher type according to the list of OCI suppported - // cipher types. - CipherType LayerCipherType `json:"cipher"` - - // Hmac contains the hmac string to help verify encryption - Hmac []byte `json:"hmac"` - - // CipherOptions contains the cipher metadata used for encryption/decryption - // This field should be populated by Encrypt/Decrypt calls - CipherOptions map[string][]byte `json:"cipheroptions"` -} - -// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions -// required to encrypt/decrypt an image -type LayerBlockCipherOptions struct { - Public PublicLayerBlockCipherOptions - Private PrivateLayerBlockCipherOptions -} - -// LayerBlockCipher returns a provider for encrypt/decrypt functionality -// for handling the layer data for a specific algorithm -type LayerBlockCipher interface { - // GenerateKey creates a symmetric key - GenerateKey() ([]byte, error) - // Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions - Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) - // Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions - Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) -} - -// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers -type LayerBlockCipherHandler struct { - cipherMap map[LayerCipherType]LayerBlockCipher -} - -// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob -type Finalizer func() (LayerBlockCipherOptions, error) - -// GetOpt returns the value of the cipher option and if the option exists -func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) { - if v, ok := lbco.Public.CipherOptions[key]; ok { - return v, ok - } else if v, ok := lbco.Private.CipherOptions[key]; ok { - return v, ok - } else { - return nil, false - } -} - -func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer { - return func() (LayerBlockCipherOptions, error) { - lbco, err := fin() - if err != nil { - return LayerBlockCipherOptions{}, err - } - lbco.Public.CipherType = typ - return lbco, err - } -} - -// Encrypt is the handler for the layer decryption routine -func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) { - if c, ok := h.cipherMap[typ]; ok { - sk, err := c.GenerateKey() - if err != nil { - return nil, nil, err - } - opt := LayerBlockCipherOptions{ - Private: PrivateLayerBlockCipherOptions{ - SymmetricKey: sk, - }, - } - encDataReader, fin, err := c.Encrypt(plainDataReader, opt) - if err == nil { - fin = wrapFinalizerWithType(fin, typ) - } - return encDataReader, fin, err - } - return nil, nil, errors.Errorf("unsupported cipher type: %s", typ) -} - -// Decrypt is the handler for the layer decryption routine -func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { - typ := opt.Public.CipherType - if typ == "" { - return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided") - } - if c, ok := h.cipherMap[LayerCipherType(typ)]; ok { - return c.Decrypt(encDataReader, opt) - } - return nil, LayerBlockCipherOptions{}, errors.Errorf("unsupported cipher type: %s", typ) -} - -// NewLayerBlockCipherHandler returns a new default handler -func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) { - h := LayerBlockCipherHandler{ - cipherMap: map[LayerCipherType]LayerBlockCipher{}, - } - - var err error - h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256) - if err != nil { - return nil, errors.Wrap(err, "unable to set up Cipher AES-256-CTR") - } - - return &h, nil -} diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go deleted file mode 100644 index 095a53e..0000000 --- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package blockcipher - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "crypto/rand" - "crypto/sha256" - "fmt" - "hash" - "io" - - "github.com/containers/ocicrypt/utils" - "github.com/pkg/errors" -) - -// AESCTRLayerBlockCipher implements the AES CTR stream cipher -type AESCTRLayerBlockCipher struct { - keylen int // in bytes - reader io.Reader - encrypt bool - stream cipher.Stream - err error - hmac hash.Hash - expHmac []byte - doneEncrypting bool -} - -type aesctrcryptor struct { - bc *AESCTRLayerBlockCipher -} - -// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits -func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) { - if bits != 256 { - return nil, errors.New("AES CTR bit count not supported") - } - return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil -} - -func (r *aesctrcryptor) Read(p []byte) (int, error) { - var ( - o int - ) - - if r.bc.err != nil { - return 0, r.bc.err - } - - o, err := utils.FillBuffer(r.bc.reader, p) - if err != nil { - if err == io.EOF { - r.bc.err = err - } else { - return 0, err - } - } - - if !r.bc.encrypt { - if _, err := r.bc.hmac.Write(p[:o]); err != nil { - r.bc.err = errors.Wrapf(err, "could not write to hmac") - return 0, r.bc.err - } - - if r.bc.err == io.EOF { - // Before we return EOF we let the HMAC comparison - // provide a verdict - if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) { - r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil)) - return 0, r.bc.err - } - } - } - - r.bc.stream.XORKeyStream(p[:o], p[:o]) - - if r.bc.encrypt { - if _, err := r.bc.hmac.Write(p[:o]); err != nil { - r.bc.err = errors.Wrapf(err, "could not write to hmac") - return 0, r.bc.err - } - - if r.bc.err == io.EOF { - // Final data encrypted; Do the 'then-MAC' part - r.bc.doneEncrypting = true - } - } - - return o, r.bc.err -} - -// init initializes an instance -func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) { - var ( - err error - ) - - key := opts.Private.SymmetricKey - if len(key) != bc.keylen { - return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen) - } - - nonce, ok := opts.GetOpt("nonce") - if !ok { - nonce = make([]byte, aes.BlockSize) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return LayerBlockCipherOptions{}, errors.Wrap(err, "unable to generate random nonce") - } - } - - block, err := aes.NewCipher(key) - if err != nil { - return LayerBlockCipherOptions{}, errors.Wrap(err, "aes.NewCipher failed") - } - - bc.reader = reader - bc.encrypt = encrypt - bc.stream = cipher.NewCTR(block, nonce) - bc.err = nil - bc.hmac = hmac.New(sha256.New, key) - bc.expHmac = opts.Public.Hmac - bc.doneEncrypting = false - - if !encrypt && len(bc.expHmac) == 0 { - return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process") - } - - lbco := LayerBlockCipherOptions{ - Private: PrivateLayerBlockCipherOptions{ - SymmetricKey: key, - CipherOptions: map[string][]byte{ - "nonce": nonce, - }, - }, - } - - return lbco, nil -} - -// GenerateKey creates a synmmetric key -func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) { - key := make([]byte, bc.keylen) - if _, err := io.ReadFull(rand.Reader, key); err != nil { - return nil, err - } - return key, nil -} - -// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions -func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) { - lbco, err := bc.init(true, plainDataReader, opt) - if err != nil { - return nil, nil, err - } - - finalizer := func() (LayerBlockCipherOptions, error) { - if !bc.doneEncrypting { - return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize") - } - if lbco.Public.CipherOptions == nil { - lbco.Public.CipherOptions = map[string][]byte{} - } - lbco.Public.Hmac = bc.hmac.Sum(nil) - return lbco, nil - } - return &aesctrcryptor{bc}, finalizer, nil -} - -// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions -func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { - lbco, err := bc.init(false, encDataReader, opt) - if err != nil { - return nil, LayerBlockCipherOptions{}, err - } - - return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil -} diff --git a/vendor/github.com/containers/ocicrypt/config/config.go b/vendor/github.com/containers/ocicrypt/config/config.go deleted file mode 100644 index d960766..0000000 --- a/vendor/github.com/containers/ocicrypt/config/config.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package config - -// EncryptConfig is the container image PGP encryption configuration holding -// the identifiers of those that will be able to decrypt the container and -// the PGP public keyring file data that contains their public keys. -type EncryptConfig struct { - // map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s' - Parameters map[string][][]byte - - DecryptConfig DecryptConfig -} - -// DecryptConfig wraps the Parameters map that holds the decryption key -type DecryptConfig struct { - // map holding 'privkeys', 'x509s', 'gpg-privatekeys' - Parameters map[string][][]byte -} - -// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can -// be passed through functions that share much code for encryption and decryption -type CryptoConfig struct { - EncryptConfig *EncryptConfig - DecryptConfig *DecryptConfig -} - -// InitDecryption initialized a CryptoConfig object with parameters used for decryption -func InitDecryption(dcparameters map[string][][]byte) CryptoConfig { - return CryptoConfig{ - DecryptConfig: &DecryptConfig{ - Parameters: dcparameters, - }, - } -} - -// InitEncryption initializes a CryptoConfig object with parameters used for encryption -// It also takes dcparameters that may be needed for decryption when adding a recipient -// to an already encrypted image -func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig { - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: parameters, - DecryptConfig: DecryptConfig{ - Parameters: dcparameters, - }, - }, - } -} - -// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig -// containing the crypto configuration of all the key bundles -func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig { - ecparam := map[string][][]byte{} - ecdcparam := map[string][][]byte{} - dcparam := map[string][][]byte{} - - for _, cc := range ccs { - if ec := cc.EncryptConfig; ec != nil { - addToMap(ecparam, ec.Parameters) - addToMap(ecdcparam, ec.DecryptConfig.Parameters) - } - - if dc := cc.DecryptConfig; dc != nil { - addToMap(dcparam, dc.Parameters) - } - } - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ecparam, - DecryptConfig: DecryptConfig{ - Parameters: ecdcparam, - }, - }, - DecryptConfig: &DecryptConfig{ - Parameters: dcparam, - }, - } - -} - -// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that -// the decryption parameters can be used to add recipients to an existing image -// if the user is able to decrypt it. -func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) { - if dc != nil { - addToMap(ec.DecryptConfig.Parameters, dc.Parameters) - } -} - -func addToMap(orig map[string][][]byte, add map[string][][]byte) { - for k, v := range add { - if ov, ok := orig[k]; ok { - orig[k] = append(ov, v...) - } else { - orig[k] = v - } - } -} diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go deleted file mode 100644 index a789d05..0000000 --- a/vendor/github.com/containers/ocicrypt/config/constructors.go +++ /dev/null @@ -1,245 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package config - -import ( - "github.com/containers/ocicrypt/crypto/pkcs11" - "strings" - - "github.com/pkg/errors" - "gopkg.in/yaml.v2" -) - -// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys -func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) { - dc := DecryptConfig{} - ep := map[string][][]byte{ - "pubkeys": pubKeys, - } - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs -func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) { - dc := DecryptConfig{} - - ep := map[string][][]byte{ - "x509s": x509s, - } - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters -func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) { - dc := DecryptConfig{} - ep := map[string][][]byte{ - "gpg-recipients": gpgRecipients, - "gpg-pubkeyringfile": {gpgPubRingFile}, - } - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// EncryptWithPkcs11 returns a CryptoConfig to encrypt with configured pkcs11 parameters -func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Yamls [][]byte) (CryptoConfig, error) { - dc := DecryptConfig{} - ep := map[string][][]byte{} - - if len(pkcs11Yamls) > 0 { - if pkcs11Config == nil { - return CryptoConfig{}, errors.New("pkcs11Config must not be nil") - } - p11confYaml, err := yaml.Marshal(pkcs11Config) - if err != nil { - return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") - } - - dc = DecryptConfig{ - Parameters: map[string][][]byte{ - "pkcs11-config": {p11confYaml}, - }, - } - ep["pkcs11-yamls"] = pkcs11Yamls - } - if len(pkcs11Pubkeys) > 0 { - ep["pkcs11-pubkeys"] = pkcs11Pubkeys - } - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// EncryptWithKeyProvider returns a CryptoConfig to encrypt with configured keyprovider parameters -func EncryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { - dc := DecryptConfig{} - ep := make(map[string][][]byte) - for _, keyProvider := range keyProviders { - keyProvidersStr := string(keyProvider) - idx := strings.Index(keyProvidersStr, ":") - if idx > 0 { - ep[keyProvidersStr[:idx]] = append(ep[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) - } else { - ep[keyProvidersStr] = append(ep[keyProvidersStr], []byte("Enabled")) - } - } - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// DecryptWithKeyProvider returns a CryptoConfig to decrypt with configured keyprovider parameters -func DecryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { - dp := make(map[string][][]byte) - ep := map[string][][]byte{} - for _, keyProvider := range keyProviders { - keyProvidersStr := string(keyProvider) - idx := strings.Index(keyProvidersStr, ":") - if idx > 0 { - dp[keyProvidersStr[:idx]] = append(dp[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) - } else { - dp[keyProvidersStr] = append(dp[keyProvidersStr], []byte("Enabled")) - } - } - dc := DecryptConfig{ - Parameters: dp, - } - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys -func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) { - if len(privKeys) != len(privKeysPasswords) { - return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords") - } - - dc := DecryptConfig{ - Parameters: map[string][][]byte{ - "privkeys": privKeys, - "privkeys-passwords": privKeysPasswords, - }, - } - - ep := map[string][][]byte{} - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs -func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) { - dc := DecryptConfig{ - Parameters: map[string][][]byte{ - "x509s": x509s, - }, - } - - ep := map[string][][]byte{} - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys -func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) { - dc := DecryptConfig{ - Parameters: map[string][][]byte{ - "gpg-privatekeys": gpgPrivKeys, - "gpg-privatekeys-passwords": gpgPrivKeysPwds, - }, - } - - ep := map[string][][]byte{} - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} - -// DecryptWithPkcs11Yaml returns a CryptoConfig to decrypt with pkcs11 YAML formatted key files -func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { - p11confYaml, err := yaml.Marshal(pkcs11Config) - if err != nil { - return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") - } - - dc := DecryptConfig{ - Parameters: map[string][][]byte{ - "pkcs11-yamls": pkcs11Yamls, - "pkcs11-config": {p11confYaml}, - }, - } - - ep := map[string][][]byte{} - - return CryptoConfig{ - EncryptConfig: &EncryptConfig{ - Parameters: ep, - DecryptConfig: dc, - }, - DecryptConfig: &dc, - }, nil -} diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go deleted file mode 100644 index b454b37..0000000 --- a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package config - -import ( - "encoding/json" - "github.com/pkg/errors" - "io/ioutil" - "os" -) - -// Command describes the structure of command, it consist of path and args, where path defines the location of -// binary executable and args are passed on to the binary executable -type Command struct { - Path string `json:"path,omitempty"` - Args []string `json:"args,omitempty"` -} - -// KeyProviderAttrs describes the structure of key provider, it defines the way of invocation to key provider -type KeyProviderAttrs struct { - Command *Command `json:"cmd,omitempty"` - Grpc string `json:"grpc,omitempty"` -} - -// OcicryptConfig represents the format of an ocicrypt_provider.conf config file -type OcicryptConfig struct { - KeyProviderConfig map[string]KeyProviderAttrs `json:"key-providers"` -} - -const ENVVARNAME = "OCICRYPT_KEYPROVIDER_CONFIG" - -// parseConfigFile parses a configuration file; it is not an error if the configuration file does -// not exist, so no error is returned. -func parseConfigFile(filename string) (*OcicryptConfig, error) { - // a non-existent config file is not an error - _, err := os.Stat(filename) - if os.IsNotExist(err) { - return nil, nil - } - - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - ic := &OcicryptConfig{} - err = json.Unmarshal(data, ic) - return ic, err -} - -// getConfiguration tries to read the configuration file at the following locations -// ${OCICRYPT_KEYPROVIDER_CONFIG} == "/etc/ocicrypt_keyprovider.yaml" -// If no configuration file could be found or read a null pointer is returned -func GetConfiguration() (*OcicryptConfig, error) { - var ic *OcicryptConfig - var err error - filename := os.Getenv(ENVVARNAME) - if len(filename) > 0 { - ic, err = parseConfigFile(filename) - if err != nil { - return nil, errors.Wrap(err, "Error while parsing keyprovider config file") - } - } else { - return nil, nil - } - return ic, nil -} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go deleted file mode 100644 index 7fcd2e3..0000000 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - Copyright The ocicrypt Authors. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pkcs11 - -import ( - "fmt" - "github.com/pkg/errors" - pkcs11uri "github.com/stefanberger/go-pkcs11uri" - "gopkg.in/yaml.v2" -) - -// Pkcs11KeyFile describes the format of the pkcs11 (private) key file. -// It also carries pkcs11 module related environment variables that are transferred to the -// Pkcs11URI object and activated when the pkcs11 module is used. -type Pkcs11KeyFile struct { - Pkcs11 struct { - Uri string `yaml:"uri"` - } `yaml:"pkcs11"` - Module struct { - Env map[string]string `yaml:"env,omitempty"` - } `yaml:"module"` -} - -// Pkcs11KeyFileObject is a representation of the Pkcs11KeyFile with the pkcs11 URI as an object -type Pkcs11KeyFileObject struct { - Uri *pkcs11uri.Pkcs11URI -} - -// ParsePkcs11Uri parses a pkcs11 URI -func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { - p11uri := pkcs11uri.New() - err := p11uri.Parse(uri) - if err != nil { - return nil, errors.Wrapf(err, "Could not parse Pkcs11URI from file") - } - return p11uri, err -} - -// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. -// The file has the following yaml format: -// pkcs11: -// - uri : -// An error is returned if the pkcs11 URI is malformed -func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { - p11keyfile := Pkcs11KeyFile{} - - err := yaml.Unmarshal([]byte(yamlstr), &p11keyfile) - if err != nil { - return nil, errors.Wrapf(err, "Could not unmarshal pkcs11 keyfile") - } - - p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) - if err != nil { - return nil, err - } - p11uri.SetEnvMap(p11keyfile.Module.Env) - - return &Pkcs11KeyFileObject{Uri: p11uri}, err -} - -// IsPkcs11PrivateKey checks whether the given YAML represents a Pkcs11 private key -func IsPkcs11PrivateKey(yamlstr []byte) bool { - _, err := ParsePkcs11KeyFile(yamlstr) - return err == nil -} - -// IsPkcs11PublicKey checks whether the given YAML represents a Pkcs11 public key -func IsPkcs11PublicKey(yamlstr []byte) bool { - _, err := ParsePkcs11KeyFile(yamlstr) - return err == nil -} - -// Pkcs11Config describes the layout of a pkcs11 config file -// The file has the following yaml format: -// module-directories: -// - /usr/lib64/pkcs11/ -// allowd-module-paths -// - /usr/lib64/pkcs11/libsofthsm2.so -type Pkcs11Config struct { - ModuleDirectories []string `yaml:"module-directories"` - AllowedModulePaths []string `yaml:"allowed-module-paths"` -} - -// GetDefaultModuleDirectories returns module directories covering -// a variety of Linux distros -func GetDefaultModuleDirectories() []string { - dirs := []string{ - "/usr/lib64/pkcs11/", // Fedora,RHEL,openSUSE - "/usr/lib/pkcs11/", // Fedora,ArchLinux - "/usr/local/lib/pkcs11/", - "/usr/lib/softhsm/", // Debian,Ubuntu - } - - // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|s390x)-linux-gnu/ - hosttype, ostype, q := getHostAndOsType() - if len(hosttype) > 0 { - dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) - dirs = append(dirs, dir) - } - return dirs -} - -// GetDefaultModuleDirectoresFormatted returns the default module directories formatted for YAML -func GetDefaultModuleDirectoriesYaml(indent string) string { - res := "" - - for _, dir := range GetDefaultModuleDirectories() { - res += indent + "- " + dir + "\n" - } - return res -} - -// ParsePkcs11ConfigFile parses a pkcs11 config file hat influences the module search behavior -// as well as the set of modules that users are allowed to use -func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { - p11conf := Pkcs11Config{} - - err := yaml.Unmarshal([]byte(yamlstr), &p11conf) - if err != nil { - return &p11conf, errors.Wrapf(err, "Could not parse Pkcs11Config") - } - return &p11conf, nil -} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go deleted file mode 100644 index 448e88c..0000000 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go +++ /dev/null @@ -1,487 +0,0 @@ -// +build cgo - -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pkcs11 - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "hash" - "net/url" - "os" - "strconv" - "strings" - - "github.com/miekg/pkcs11" - "github.com/pkg/errors" - pkcs11uri "github.com/stefanberger/go-pkcs11uri" -) - -var ( - // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed - OAEPLabel = []byte("") - // OAEPDefaultHash defines the default hash used for OAEP encryption; this cannot be changed - OAEPDefaultHash = "sha1" - - // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM - OAEPSha1Params = &pkcs11.OAEPParams{ - HashAlg: pkcs11.CKM_SHA_1, - MGF: pkcs11.CKG_MGF1_SHA1, - SourceType: pkcs11.CKZ_DATA_SPECIFIED, - SourceData: OAEPLabel, - } - // OAEPSha256Params describes the OAEP parameters with sha256 hash algorithm - OAEPSha256Params = &pkcs11.OAEPParams{ - HashAlg: pkcs11.CKM_SHA256, - MGF: pkcs11.CKG_MGF1_SHA256, - SourceType: pkcs11.CKZ_DATA_SPECIFIED, - SourceData: OAEPLabel, - } -) - -// rsaPublicEncryptOAEP encrypts the given plaintext with the given *rsa.PublicKey; the -// environment variable OCICRYPT_OAEP_HASHALG can be set to 'sha1' to force usage of sha1 for OAEP (SoftHSM). -// This function is needed by clients who are using a public key file for pkcs11 encryption -func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, string, error) { - var ( - hashfunc hash.Hash - hashalg string - ) - - oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") - // The default is 'sha1' - switch strings.ToLower(oaephash) { - case "sha1", "": - hashfunc = sha1.New() - hashalg = "sha1" - case "sha256": - hashfunc = sha256.New() - hashalg = "sha256" - default: - return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) - } - ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) - if err != nil { - return nil, "", errors.Wrapf(err, "rss.EncryptOAEP failed") - } - - return ciphertext, hashalg, nil -} - -// pkcs11UriGetLoginParameters gets the parameters necessary for login from the Pkcs11URI -// PIN and module are mandatory; slot-id is optional and if not found -1 will be returned -// For a privateKeyOperation a PIN is required and if none is given, this function will return an error -func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (string, string, int64, error) { - var ( - pin string - err error - ) - if privateKeyOperation { - if !p11uri.HasPIN() { - return "", "", 0, errors.New("Missing PIN for private key operation") - } - } - // some devices require a PIN to find a *public* key object, others don't - pin, _ = p11uri.GetPIN() - - module, err := p11uri.GetModule() - if err != nil { - return "", "", 0, errors.Wrap(err, "No module available in pkcs11 URI") - } - - slotid := int64(-1) - - slot, ok := p11uri.GetPathAttribute("slot-id", false) - if ok { - slotid, err = strconv.ParseInt(slot, 10, 64) - if err != nil { - return "", "", 0, errors.Wrap(err, "slot-id is not a valid number") - } - if slotid < 0 { - return "", "", 0, fmt.Errorf("slot-id is a negative number") - } - if uint64(slotid) > 0xffffffff { - return "", "", 0, fmt.Errorf("slot-id is larger than 32 bit") - } - } - - return pin, module, slotid, nil -} - -// pkcs11UriGetKeyIdAndLabel gets the key label by retrieving the value of the 'object' attribute -func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, error) { - keyid, ok2 := p11uri.GetPathAttribute("id", false) - label, ok1 := p11uri.GetPathAttribute("object", false) - if !ok1 && !ok2 { - return "", "", errors.New("Neither 'id' nor 'object' attributes were found in pkcs11 URI") - } - return keyid, label, nil -} - -// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN -func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { - session, err = p11ctx.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) - if err != nil { - return 0, errors.Wrapf(err, "OpenSession to slot %d failed", slotid) - } - if len(pin) > 0 { - err = p11ctx.Login(session, pkcs11.CKU_USER, pin) - if err != nil { - _ = p11ctx.CloseSession(session) - return 0, errors.Wrap(err, "Could not login to device") - } - } - return session, nil -} - -// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (share libary) and to get -// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise -// one slot after the other will be attempted and the first one where login succeeds will be used -func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { - pin, module, slotid, err := pkcs11UriGetLoginParameters(p11uri, privateKeyOperation) - if err != nil { - return nil, 0, err - } - - p11ctx := pkcs11.New(module) - if p11ctx == nil { - return nil, 0, errors.New("Please check module path, input is: " + module) - } - - err = p11ctx.Initialize() - if err != nil { - p11Err := err.(pkcs11.Error) - if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { - return nil, 0, errors.Wrap(err, "Initialize failed") - } - } - - if slotid >= 0 { - session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) - return p11ctx, session, err - } else { - slots, err := p11ctx.GetSlotList(true) - if err != nil { - return nil, 0, errors.Wrap(err, "GetSlotList failed") - } - - tokenlabel, ok := p11uri.GetPathAttribute("token", false) - if !ok { - return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") - } - - for _, slot := range slots { - ti, err := p11ctx.GetTokenInfo(slot) - if err != nil || ti.Label != tokenlabel { - continue - } - - session, err = pkcs11OpenSession(p11ctx, slot, pin) - if err == nil { - return p11ctx, session, err - } - } - if len(pin) > 0 { - return nil, 0, errors.New("Could not create session to any slot and/or log in") - } - return nil, 0, errors.New("Could not create session to any slot") - } -} - -func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { - _ = ctx.Logout(session) - _ = ctx.CloseSession(session) - _ = ctx.Finalize() - ctx.Destroy() -} - -// findObject finds an object of the given class with the given keyid and/or label -func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, keyid, label string) (pkcs11.ObjectHandle, error) { - msg := "" - - template := []*pkcs11.Attribute{ - pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), - } - if len(label) > 0 { - template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) - msg = fmt.Sprintf("label '%s'", label) - } - if len(keyid) > 0 { - template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, keyid)) - if len(msg) > 0 { - msg += " and " - } - msg += url.PathEscape(keyid) - } - - if err := p11ctx.FindObjectsInit(session, template); err != nil { - return 0, errors.Wrap(err, "FindObjectsInit failed") - } - - obj, _, err := p11ctx.FindObjects(session, 100) - if err != nil { - return 0, errors.Wrap(err, "FindObjects failed") - } - - if err := p11ctx.FindObjectsFinal(session); err != nil { - return 0, errors.Wrap(err, "FindObjectsFinal failed") - } - if len(obj) > 1 { - return 0, errors.Errorf("There are too many (=%d) keys with %s", len(obj), msg) - } else if len(obj) == 1 { - return obj[0], nil - } - - return 0, errors.Errorf("Could not find any object with %s", msg) -} - -// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext -func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, string, error) { - oldenv, err := setEnvVars(pubKey.Uri.GetEnvMap()) - if err != nil { - return nil, "", err - } - defer restoreEnv(oldenv) - - p11ctx, session, err := pkcs11UriLogin(pubKey.Uri, false) - if err != nil { - return nil, "", err - } - defer pkcs11Logout(p11ctx, session) - - keyid, label, err := pkcs11UriGetKeyIdAndLabel(pubKey.Uri) - if err != nil { - return nil, "", err - } - - p11PubKey, err := findObject(p11ctx, session, pkcs11.CKO_PUBLIC_KEY, keyid, label) - if err != nil { - return nil, "", err - } - - var hashalg string - - var oaep *pkcs11.OAEPParams - oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") - // the default is sha1 - switch strings.ToLower(oaephash) { - case "sha1", "": - oaep = OAEPSha1Params - hashalg = "sha1" - case "sha256": - oaep = OAEPSha256Params - hashalg = "sha256" - default: - return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) - } - - err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) - if err != nil { - return nil, "", errors.Wrap(err, "EncryptInit error") - } - - ciphertext, err := p11ctx.Encrypt(session, plaintext) - if err != nil { - return nil, "", errors.Wrap(err, "Encrypt failed") - } - return ciphertext, hashalg, nil -} - -// privateDecryptOAEP uses a pkcs11 URI describing a private key to OAEP decrypt a ciphertext -func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hashalg string) ([]byte, error) { - oldenv, err := setEnvVars(privKeyObj.Uri.GetEnvMap()) - if err != nil { - return nil, err - } - defer restoreEnv(oldenv) - - p11ctx, session, err := pkcs11UriLogin(privKeyObj.Uri, true) - if err != nil { - return nil, err - } - defer pkcs11Logout(p11ctx, session) - - keyid, label, err := pkcs11UriGetKeyIdAndLabel(privKeyObj.Uri) - if err != nil { - return nil, err - } - - p11PrivKey, err := findObject(p11ctx, session, pkcs11.CKO_PRIVATE_KEY, keyid, label) - if err != nil { - return nil, err - } - - var oaep *pkcs11.OAEPParams - - // the default is sha1 - switch hashalg { - case "sha1", "": - oaep = OAEPSha1Params - case "sha256": - oaep = OAEPSha256Params - default: - return nil, errors.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) - } - - err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) - if err != nil { - return nil, errors.Wrapf(err, "DecryptInit failed") - } - plaintext, err := p11ctx.Decrypt(session, ciphertext) - if err != nil { - return nil, errors.Wrapf(err, "Decrypt failed") - } - return plaintext, err -} - -// -// The following part deals with the JSON formatted message for multiple pkcs11 recipients -// - -// Pkcs11Blob holds the encrypted blobs for all recipients; this is what we will put into the image's annotations -type Pkcs11Blob struct { - Version uint `json:"version"` - Recipients []Pkcs11Recipient `json:"recipients"` -} - -// Pkcs11Recipient holds the b64-encoded and encrypted blob for a particular recipient -type Pkcs11Recipient struct { - Version uint `json:"version"` - Blob string `json:"blob"` - Hash string `json:"hash,omitempty"` -} - -// EncryptMultiple encrypts for one or multiple pkcs11 devices; the public keys passed to this function -// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the -// following format: -// { -// recipients: [ // recipient list -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// [...] -// ] -// } -func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { - var ( - ciphertext []byte - err error - pkcs11blob Pkcs11Blob = Pkcs11Blob{Version: 0} - hashalg string - ) - - for _, pubKey := range pubKeys { - switch pkey := pubKey.(type) { - case *rsa.PublicKey: - ciphertext, hashalg, err = rsaPublicEncryptOAEP(pkey, data) - case *Pkcs11KeyFileObject: - ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) - default: - err = errors.Errorf("Unsupported key object type for pkcs11 public key") - } - if err != nil { - return nil, err - } - - if hashalg == OAEPDefaultHash { - hashalg = "" - } - recipient := Pkcs11Recipient{ - Version: 0, - Blob: base64.StdEncoding.EncodeToString(ciphertext), - Hash: hashalg, - } - - pkcs11blob.Recipients = append(pkcs11blob.Recipients, recipient) - } - return json.Marshal(&pkcs11blob) -} - -// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. -// The input pkcs11blobstr is a string with the following format: -// { -// recipients: [ // recipient list -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// [...] -// } -func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { - pkcs11blob := Pkcs11Blob{} - err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) - if err != nil { - return nil, errors.Wrapf(err, "Could not parse Pkcs11Blob") - } - switch pkcs11blob.Version { - case 0: - // latest supported version - default: - return nil, errors.Errorf("Found Pkcs11Blob with version %d but maximum supported version is 0.", pkcs11blob.Version) - } - // since we do trial and error, collect all encountered errors - errs := "" - - for _, recipient := range pkcs11blob.Recipients { - switch recipient.Version { - case 0: - // last supported version - default: - return nil, errors.Errorf("Found Pkcs11Recipient with version %d but maximum supported version is 0.", recipient.Version) - } - - ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) - if err != nil || len(ciphertext) == 0 { - // This should never happen... we skip over decoding issues - errs += fmt.Sprintf("Base64 decoding failed: %s\n", err) - continue - } - // try all keys until one works - for _, privKeyObj := range privKeyObjs { - plaintext, err := privateDecryptOAEP(privKeyObj, ciphertext, recipient.Hash) - if err == nil { - return plaintext, nil - } - if uri, err2 := privKeyObj.Uri.Format(); err2 == nil { - errs += fmt.Sprintf("%s : %s\n", uri, err) - } else { - errs += fmt.Sprintf("%s\n", err) - } - } - } - - return nil, errors.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) -} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go deleted file mode 100644 index 6edf752..0000000 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !cgo - -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pkcs11 - -import ( - "github.com/pkg/errors" -) - -func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { - return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") -} - -func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { - return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") -} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go deleted file mode 100644 index 306e372..0000000 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pkcs11 - -import ( - "os" - "runtime" - "strings" - "sync" - - "github.com/pkg/errors" -) - -var ( - envLock sync.Mutex -) - -// setEnvVars sets the environment variables given in the map and locks the environment from -// modification with the same function; if successful, you *must* call restoreEnv with the return -// value from this function -func setEnvVars(env map[string]string) ([]string, error) { - envLock.Lock() - - if len(env) == 0 { - return nil, nil - } - - oldenv := os.Environ() - - for k, v := range env { - err := os.Setenv(k, v) - if err != nil { - restoreEnv(oldenv) - return nil, errors.Wrapf(err, "Could not set environment variable '%s' to '%s'", k, v) - } - } - - return oldenv, nil -} - -func arrayToMap(elements []string) map[string]string { - o := make(map[string]string) - - for _, element := range elements { - p := strings.SplitN(element, "=", 2) - if len(p) == 2 { - o[p[0]] = p[1] - } - } - - return o -} - -// restoreEnv restores the environment to be exactly as given in the array of strings -// and unlocks the lock -func restoreEnv(envs []string) { - if envs != nil && len(envs) >= 0 { - target := arrayToMap(envs) - curr := arrayToMap(os.Environ()) - - for nc, vc := range curr { - vt, ok := target[nc] - if !ok { - os.Unsetenv(nc) - } else if vc == vt { - delete(target, nc) - } - } - - for nt, vt := range target { - os.Setenv(nt, vt) - } - } - - envLock.Unlock() -} - -func getHostAndOsType() (string, string, string) { - ht := "" - ot := "" - st := "" - switch runtime.GOOS { - case "linux": - ot = "linux" - st = "gnu" - switch runtime.GOARCH { - case "arm": - ht = "arm" - case "arm64": - ht = "aarch64" - case "amd64": - ht = "x86_64" - case "ppc64le": - ht = "powerpc64le" - case "s390x": - ht = "s390x" - } - } - return ht, ot, st -} diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go deleted file mode 100644 index f5142cc..0000000 --- a/vendor/github.com/containers/ocicrypt/encryption.go +++ /dev/null @@ -1,350 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ocicrypt - -import ( - "encoding/base64" - "encoding/json" - "fmt" - keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" - "github.com/containers/ocicrypt/keywrap/keyprovider" - "io" - "strings" - - "github.com/containers/ocicrypt/blockcipher" - "github.com/containers/ocicrypt/config" - "github.com/containers/ocicrypt/keywrap" - "github.com/containers/ocicrypt/keywrap/jwe" - "github.com/containers/ocicrypt/keywrap/pgp" - "github.com/containers/ocicrypt/keywrap/pkcs11" - "github.com/containers/ocicrypt/keywrap/pkcs7" - "github.com/opencontainers/go-digest" - log "github.com/sirupsen/logrus" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// EncryptLayerFinalizer is a finalizer run to return the annotations to set for -// the encrypted layer -type EncryptLayerFinalizer func() (map[string]string, error) - -func init() { - keyWrappers = make(map[string]keywrap.KeyWrapper) - keyWrapperAnnotations = make(map[string]string) - RegisterKeyWrapper("pgp", pgp.NewKeyWrapper()) - RegisterKeyWrapper("jwe", jwe.NewKeyWrapper()) - RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper()) - RegisterKeyWrapper("pkcs11", pkcs11.NewKeyWrapper()) - ic, err := keyproviderconfig.GetConfiguration() - if err != nil { - log.Error(err) - } else if ic != nil { - for provider, attrs := range ic.KeyProviderConfig { - RegisterKeyWrapper("provider."+provider, keyprovider.NewKeyWrapper(provider, attrs)) - } - } -} - -var keyWrappers map[string]keywrap.KeyWrapper -var keyWrapperAnnotations map[string]string - -// RegisterKeyWrapper allows to register key wrappers by their encryption scheme -func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) { - keyWrappers[scheme] = iface - keyWrapperAnnotations[iface.GetAnnotationID()] = scheme -} - -// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe) -func GetKeyWrapper(scheme string) keywrap.KeyWrapper { - return keyWrappers[scheme] -} - -// GetWrappedKeysMap returns a map of wrappedKeys as values in a -// map with the encryption scheme(s) as the key(s) -func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string { - wrappedKeysMap := make(map[string]string) - - for annotationsID, scheme := range keyWrapperAnnotations { - if annotation, ok := desc.Annotations[annotationsID]; ok { - wrappedKeysMap[scheme] = annotation - } - } - return wrappedKeysMap -} - -// EncryptLayer encrypts the layer by running one encryptor after the other -func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) { - var ( - encLayerReader io.Reader - err error - encrypted bool - bcFin blockcipher.Finalizer - privOptsData []byte - pubOptsData []byte - ) - - if ec == nil { - return nil, nil, errors.New("EncryptConfig must not be nil") - } - - for annotationsID := range keyWrapperAnnotations { - annotation := desc.Annotations[annotationsID] - if annotation != "" { - privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc) - if err != nil { - return nil, nil, err - } - pubOptsData, err = getLayerPubOpts(desc) - if err != nil { - return nil, nil, err - } - // already encrypted! - encrypted = true - } - } - - if !encrypted { - encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR) - if err != nil { - return nil, nil, err - } - } - - encLayerFinalizer := func() (map[string]string, error) { - // If layer was already encrypted, bcFin should be nil, use existing optsData - if bcFin != nil { - opts, err := bcFin() - if err != nil { - return nil, err - } - privOptsData, err = json.Marshal(opts.Private) - if err != nil { - return nil, errors.Wrapf(err, "could not JSON marshal opts") - } - pubOptsData, err = json.Marshal(opts.Public) - if err != nil { - return nil, errors.Wrapf(err, "could not JSON marshal opts") - } - } - - newAnnotations := make(map[string]string) - keysWrapped := false - for annotationsID, scheme := range keyWrapperAnnotations { - b64Annotations := desc.Annotations[annotationsID] - keywrapper := GetKeyWrapper(scheme) - b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData) - if err != nil { - return nil, err - } - if b64Annotations != "" { - keysWrapped = true - newAnnotations[annotationsID] = b64Annotations - } - } - - if !keysWrapped { - return nil, errors.New("no wrapped keys produced by encryption") - } - newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData) - - if len(newAnnotations) == 0 { - return nil, errors.New("no encryptor found to handle encryption") - } - - return newAnnotations, err - } - - // if nothing was encrypted, we just return encLayer = nil - return encLayerReader, encLayerFinalizer, err - -} - -// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the -// annotation data -func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) { - newAnnotation, err := keywrapper.WrapKeys(ec, optsData) - if err != nil || len(newAnnotation) == 0 { - return b64Annotations, err - } - b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation) - if b64Annotations == "" { - return b64newAnnotation, nil - } - return b64Annotations + "," + b64newAnnotation, nil -} - -// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it -// can apply the provided private key -// If unwrapOnly is set we will only try to decrypt the layer encryption key and return -func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) { - if dc == nil { - return nil, "", errors.New("DecryptConfig must not be nil") - } - privOptsData, err := decryptLayerKeyOptsData(dc, desc) - if err != nil || unwrapOnly { - return nil, "", err - } - - var pubOptsData []byte - pubOptsData, err = getLayerPubOpts(desc) - if err != nil { - return nil, "", err - } - - return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData) -} - -func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { - privKeyGiven := false - errs := "" - for annotationsID, scheme := range keyWrapperAnnotations { - b64Annotation := desc.Annotations[annotationsID] - if b64Annotation != "" { - keywrapper := GetKeyWrapper(scheme) - - if keywrapper.NoPossibleKeys(dc.Parameters) { - continue - } - - if len(keywrapper.GetPrivateKeys(dc.Parameters)) > 0 { - privKeyGiven = true - } - optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation) - if err != nil { - // try next keywrap.KeyWrapper - errs += fmt.Sprintf("%s\n", err) - continue - } - if optsData == nil { - // try next keywrap.KeyWrapper - continue - } - return optsData, nil - } - } - if !privKeyGiven { - return nil, errors.New("missing private key needed for decryption") - } - return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) -} - -func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { - pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"] - if pubOptsString == "" { - return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{}) - } - return base64.StdEncoding.DecodeString(pubOptsString) -} - -// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function -// of the given keywrapper with it and returns the result in case the Unwrap functions -// does not return an error. If all attempts fail, an error is returned. -func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) { - if b64Annotations == "" { - return nil, nil - } - errs := "" - for _, b64Annotation := range strings.Split(b64Annotations, ",") { - annotation, err := base64.StdEncoding.DecodeString(b64Annotation) - if err != nil { - return nil, errors.New("could not base64 decode the annotation") - } - optsData, err := keywrapper.UnwrapKey(dc, annotation) - if err != nil { - errs += fmt.Sprintf("- %s\n", err) - continue - } - return optsData, nil - } - return nil, errors.Errorf("no suitable key found for decrypting layer key:\n%s", errs) -} - -// commonEncryptLayer is a function to encrypt the plain layer using a new random -// symmetric key and return the LayerBlockCipherHandler's JSON in string form for -// later use during decryption -func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) { - lbch, err := blockcipher.NewLayerBlockCipherHandler() - if err != nil { - return nil, nil, err - } - - encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ) - if err != nil { - return nil, nil, err - } - - newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) { - lbco, err := bcFin() - if err != nil { - return blockcipher.LayerBlockCipherOptions{}, err - } - lbco.Private.Digest = d - return lbco, nil - } - - return encLayerReader, newBcFin, err -} - -// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer -// by passing along the optsData -func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) { - privOpts := blockcipher.PrivateLayerBlockCipherOptions{} - err := json.Unmarshal(privOptsData, &privOpts) - if err != nil { - return nil, "", errors.Wrapf(err, "could not JSON unmarshal privOptsData") - } - - lbch, err := blockcipher.NewLayerBlockCipherHandler() - if err != nil { - return nil, "", err - } - - pubOpts := blockcipher.PublicLayerBlockCipherOptions{} - if len(pubOptsData) > 0 { - err := json.Unmarshal(pubOptsData, &pubOpts) - if err != nil { - return nil, "", errors.Wrapf(err, "could not JSON unmarshal pubOptsData") - } - } - - opts := blockcipher.LayerBlockCipherOptions{ - Private: privOpts, - Public: pubOpts, - } - - plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts) - if err != nil { - return nil, "", err - } - - return plainLayerReader, opts.Private.Digest, nil -} - -// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace' -// and returns a map with those taken out -func FilterOutAnnotations(annotations map[string]string) map[string]string { - a := make(map[string]string) - if len(annotations) > 0 { - for k, v := range annotations { - if strings.HasPrefix(k, "org.opencontainers.image.enc.") { - continue - } - a[k] = v - } - } - return a -} diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go deleted file mode 100644 index b9d5553..0000000 --- a/vendor/github.com/containers/ocicrypt/gpg.go +++ /dev/null @@ -1,425 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ocicrypt - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "regexp" - "strconv" - "strings" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/term" -) - -// GPGVersion enum representing the GPG client version to use. -type GPGVersion int - -const ( - // GPGv2 signifies gpgv2+ - GPGv2 GPGVersion = iota - // GPGv1 signifies gpgv1+ - GPGv1 - // GPGVersionUndetermined signifies gpg client version undetermined - GPGVersionUndetermined -) - -// GPGClient defines an interface for wrapping the gpg command line tools -type GPGClient interface { - // ReadGPGPubRingFile gets the byte sequence of the gpg public keyring - ReadGPGPubRingFile() ([]byte, error) - // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase - GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) - // GetSecretKeyDetails gets the details of a secret key - GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) - // GetKeyDetails gets the details of a public key - GetKeyDetails(keyid uint64) ([]byte, bool, error) - // ResolveRecipients resolves PGP key ids to user names - ResolveRecipients([]string) []string -} - -// gpgClient contains generic gpg client information -type gpgClient struct { - gpgHomeDir string -} - -// gpgv2Client is a gpg2 client -type gpgv2Client struct { - gpgClient -} - -// gpgv1Client is a gpg client -type gpgv1Client struct { - gpgClient -} - -// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if -// not defaults to regular gpg. -func GuessGPGVersion() GPGVersion { - if err := exec.Command("gpg2", "--version").Run(); err == nil { - return GPGv2 - } else if err := exec.Command("gpg", "--version").Run(); err == nil { - return GPGv1 - } else { - return GPGVersionUndetermined - } -} - -// NewGPGClient creates a new GPGClient object representing the given version -// and using the given home directory -func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) { - v := new(GPGVersion) - switch gpgVersion { - case "v1": - *v = GPGv1 - case "v2": - *v = GPGv2 - default: - v = nil - } - return newGPGClient(v, gpgHomeDir) -} - -func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) { - var gpgVersion GPGVersion - if version != nil { - gpgVersion = *version - } else { - gpgVersion = GuessGPGVersion() - } - - switch gpgVersion { - case GPGv1: - return &gpgv1Client{ - gpgClient: gpgClient{gpgHomeDir: homedir}, - }, nil - case GPGv2: - return &gpgv2Client{ - gpgClient: gpgClient{gpgHomeDir: homedir}, - }, nil - case GPGVersionUndetermined: - return nil, fmt.Errorf("unable to determine GPG version") - default: - return nil, fmt.Errorf("unhandled case: NewGPGClient") - } -} - -// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase -func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) { - var args []string - - if gc.gpgHomeDir != "" { - args = append(args, []string{"--homedir", gc.gpgHomeDir}...) - } - - rfile, wfile, err := os.Pipe() - if err != nil { - return nil, errors.Wrapf(err, "could not create pipe") - } - defer func() { - rfile.Close() - wfile.Close() - }() - // fill pipe in background - go func(passphrase string) { - _, _ = wfile.Write([]byte(passphrase)) - wfile.Close() - }(passphrase) - - args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) - - cmd := exec.Command("gpg2", args...) - cmd.ExtraFiles = []*os.File{rfile} - - return runGPGGetOutput(cmd) -} - -// ReadGPGPubRingFile reads the GPG public key ring file -func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) { - var args []string - - if gc.gpgHomeDir != "" { - args = append(args, []string{"--homedir", gc.gpgHomeDir}...) - } - args = append(args, []string{"--batch", "--export"}...) - - cmd := exec.Command("gpg2", args...) - - return runGPGGetOutput(cmd) -} - -func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { - var args []string - - if gc.gpgHomeDir != "" { - args = []string{"--homedir", gc.gpgHomeDir} - } - args = append(args, option, fmt.Sprintf("0x%x", keyid)) - - cmd := exec.Command("gpg2", args...) - - keydata, err := runGPGGetOutput(cmd) - return keydata, err == nil, err -} - -// GetSecretKeyDetails retrieves the secret key details of key with keyid. -// returns a byte array of the details and a bool if the key exists -func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { - return gc.getKeyDetails("-K", keyid) -} - -// GetKeyDetails retrieves the public key details of key with keyid. -// returns a byte array of the details and a bool if the key exists -func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { - return gc.getKeyDetails("-k", keyid) -} - -// ResolveRecipients converts PGP keyids to email addresses, if possible -func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string { - return resolveRecipients(gc, recipients) -} - -// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase -func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) { - var args []string - - if gc.gpgHomeDir != "" { - args = append(args, []string{"--homedir", gc.gpgHomeDir}...) - } - args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) - - cmd := exec.Command("gpg", args...) - - return runGPGGetOutput(cmd) -} - -// ReadGPGPubRingFile reads the GPG public key ring file -func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) { - var args []string - - if gc.gpgHomeDir != "" { - args = append(args, []string{"--homedir", gc.gpgHomeDir}...) - } - args = append(args, []string{"--batch", "--export"}...) - - cmd := exec.Command("gpg", args...) - - return runGPGGetOutput(cmd) -} - -func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { - var args []string - - if gc.gpgHomeDir != "" { - args = []string{"--homedir", gc.gpgHomeDir} - } - args = append(args, option, fmt.Sprintf("0x%x", keyid)) - - cmd := exec.Command("gpg", args...) - - keydata, err := runGPGGetOutput(cmd) - - return keydata, err == nil, err -} - -// GetSecretKeyDetails retrieves the secret key details of key with keyid. -// returns a byte array of the details and a bool if the key exists -func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { - return gc.getKeyDetails("-K", keyid) -} - -// GetKeyDetails retrieves the public key details of key with keyid. -// returns a byte array of the details and a bool if the key exists -func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { - return gc.getKeyDetails("-k", keyid) -} - -// ResolveRecipients converts PGP keyids to email addresses, if possible -func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string { - return resolveRecipients(gc, recipients) -} - -// runGPGGetOutput runs the GPG commandline and returns stdout as byte array -// and any stderr in the error -func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) { - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - if err := cmd.Start(); err != nil { - return nil, err - } - - stdoutstr, err2 := ioutil.ReadAll(stdout) - stderrstr, _ := ioutil.ReadAll(stderr) - - if err := cmd.Wait(); err != nil { - return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr)) - } - - return stdoutstr, err2 -} - -// resolveRecipients walks the list of recipients and attempts to convert -// all keyIds to email addresses; if something goes wrong during the -// conversion of a recipient, the original string is returned for that -// recpient -func resolveRecipients(gc GPGClient, recipients []string) []string { - var result []string - - for _, recipient := range recipients { - keyID, err := strconv.ParseUint(recipient, 0, 64) - if err != nil { - result = append(result, recipient) - } else { - details, found, _ := gc.GetKeyDetails(keyID) - if !found { - result = append(result, recipient) - } else { - email := extractEmailFromDetails(details) - if email == "" { - result = append(result, recipient) - } else { - result = append(result, email) - } - } - } - } - return result -} - -var emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) - -func extractEmailFromDetails(details []byte) string { - loc := emailPattern.FindSubmatchIndex(details) - if len(loc) == 0 { - return "" - } - return string(emailPattern.Expand(nil, []byte("$email"), details, loc)) -} - -// uint64ToStringArray converts an array of uint64's to an array of strings -// by applying a format string to each uint64 -func uint64ToStringArray(format string, in []uint64) []string { - var ret []string - - for _, v := range in { - ret = append(ret, fmt.Sprintf(format, v)) - } - return ret -} - -// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the -// wrapped symmetric keys. For this it determines whether a private key is -// in the GPGVault or on this system and prompts for the passwords for those -// that are available. If we do not find a private key on the system for -// getting to the symmetric key of a layer then an error is generated. -func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) { - // PrivateKeyData describes a private key - type PrivateKeyData struct { - KeyData []byte - KeyDataPassword []byte - } - var pkd PrivateKeyData - keyIDPasswordMap := make(map[uint64]PrivateKeyData) - - for _, desc := range descs { - for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) { - if scheme != "pgp" { - continue - } - keywrapper := GetKeyWrapper(scheme) - if keywrapper == nil { - return nil, nil, errors.Errorf("could not get KeyWrapper for %s\n", scheme) - } - keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets) - if err != nil { - return nil, nil, err - } - - found := false - for _, keyid := range keyIds { - // do we have this key? -- first check the vault - if gpgVault != nil { - _, keydata := gpgVault.GetGPGPrivateKey(keyid) - if len(keydata) > 0 { - pkd = PrivateKeyData{ - KeyData: keydata, - KeyDataPassword: nil, // password not supported in this case - } - keyIDPasswordMap[keyid] = pkd - found = true - break - } - } else if gpgClient != nil { - // check the local system's gpg installation - keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid) - // this may fail if the key is not here; we ignore the error - if !haveKey { - // key not on this system - continue - } - - _, found = keyIDPasswordMap[keyid] - if !found { - fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo)) - fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid) - - password, err := term.ReadPassword(int(os.Stdin.Fd())) - fmt.Printf("\n") - if err != nil { - return nil, nil, err - } - keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password)) - if err != nil { - return nil, nil, err - } - pkd = PrivateKeyData{ - KeyData: keydata, - KeyDataPassword: password, - } - keyIDPasswordMap[keyid] = pkd - found = true - } - break - } else { - return nil, nil, errors.New("no GPGVault or GPGClient passed") - } - } - if !found && len(b64pgpPackets) > 0 && mustFindKey { - ids := uint64ToStringArray("0x%x", keyIds) - - return nil, nil, errors.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) - } - } - } - - for _, pkd := range keyIDPasswordMap { - gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData) - gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword) - } - - return gpgPrivKeys, gpgPrivKeysPwds, nil -} diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go deleted file mode 100644 index dd9a100..0000000 --- a/vendor/github.com/containers/ocicrypt/gpgvault.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ocicrypt - -import ( - "bytes" - "io/ioutil" - - "github.com/pkg/errors" - "golang.org/x/crypto/openpgp" - "golang.org/x/crypto/openpgp/packet" -) - -// GPGVault defines an interface for wrapping multiple secret key rings -type GPGVault interface { - // AddSecretKeyRingData adds a secret keyring via its raw byte array - AddSecretKeyRingData(gpgSecretKeyRingData []byte) error - // AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays - AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error - // AddSecretKeyRingFiles adds secret keyrings given their filenames - AddSecretKeyRingFiles(filenames []string) error - // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase - GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) -} - -// gpgVault wraps an array of gpgSecretKeyRing -type gpgVault struct { - entityLists []openpgp.EntityList - keyDataList [][]byte // the raw data original passed in -} - -// NewGPGVault creates an empty GPGVault -func NewGPGVault() GPGVault { - return &gpgVault{} -} - -// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte -// array read from the file must be passed and will be parsed by this function -func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error { - // read the private keys - r := bytes.NewReader(gpgSecretKeyRingData) - entityList, err := openpgp.ReadKeyRing(r) - if err != nil { - return errors.Wrapf(err, "could not read keyring") - } - g.entityLists = append(g.entityLists, entityList) - g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData) - return nil -} - -// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte -// arrays read from files must be passed -func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error { - for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray { - if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil { - return err - } - } - return nil -} - -// AddSecretKeyRingFiles adds the secret key rings given their filenames -func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error { - for _, filename := range filenames { - gpgSecretKeyRingData, err := ioutil.ReadFile(filename) - if err != nil { - return err - } - err = g.AddSecretKeyRingData(gpgSecretKeyRingData) - if err != nil { - return err - } - } - return nil -} - -// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase -func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) { - for i, el := range g.entityLists { - decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications) - if len(decKeys) > 0 { - return decKeys, g.keyDataList[i] - } - } - return nil, nil -} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go deleted file mode 100644 index 41d0f1b..0000000 --- a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package jwe - -import ( - "crypto/ecdsa" - - "github.com/containers/ocicrypt/config" - "github.com/containers/ocicrypt/keywrap" - "github.com/containers/ocicrypt/utils" - "github.com/pkg/errors" - jose "gopkg.in/square/go-jose.v2" -) - -type jweKeyWrapper struct { -} - -func (kw *jweKeyWrapper) GetAnnotationID() string { - return "org.opencontainers.image.enc.keys.jwe" -} - -// NewKeyWrapper returns a new key wrapping interface using jwe -func NewKeyWrapper() keywrap.KeyWrapper { - return &jweKeyWrapper{} -} - -// WrapKeys wraps the session key for recpients and encrypts the optsData, which -// describe the symmetric key used for encrypting the layer -func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { - var joseRecipients []jose.Recipient - - err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"]) - if err != nil { - return nil, err - } - // no recipients is not an error... - if len(joseRecipients) == 0 { - return nil, nil - } - - encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil) - if err != nil { - return nil, errors.Wrapf(err, "jose.NewMultiEncrypter failed") - } - jwe, err := encrypter.Encrypt(optsData) - if err != nil { - return nil, errors.Wrapf(err, "JWE Encrypt failed") - } - return []byte(jwe.FullSerialize()), nil -} - -func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) { - jwe, err := jose.ParseEncrypted(string(jweString)) - if err != nil { - return nil, errors.New("jose.ParseEncrypted failed") - } - - privKeys := kw.GetPrivateKeys(dc.Parameters) - if len(privKeys) == 0 { - return nil, errors.New("No private keys found for JWE decryption") - } - privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) - if len(privKeysPasswords) != len(privKeys) { - return nil, errors.New("Private key password array length must be same as that of private keys") - } - - for idx, privKey := range privKeys { - key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE") - if err != nil { - return nil, err - } - _, _, plain, err := jwe.DecryptMulti(key) - if err == nil { - return plain, nil - } - } - return nil, errors.New("JWE: No suitable private key found for decryption") -} - -func (kw *jweKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { - return len(kw.GetPrivateKeys(dcparameters)) == 0 -} - -func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { - return dcparameters["privkeys"] -} - -func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { - return dcparameters["privkeys-passwords"] -} - -func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) { - return nil, nil -} - -func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) { - return []string{"[jwe]"}, nil -} - -func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error { - if len(pubKeys) == 0 { - return nil - } - for _, pubKey := range pubKeys { - key, err := utils.ParsePublicKey(pubKey, "JWE") - if err != nil { - return err - } - - alg := jose.RSA_OAEP - switch key.(type) { - case *ecdsa.PublicKey: - alg = jose.ECDH_ES_A256KW - } - - *joseRecipients = append(*joseRecipients, jose.Recipient{ - Algorithm: alg, - Key: key, - }) - } - return nil -} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go deleted file mode 100644 index 3b4c47e..0000000 --- a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go +++ /dev/null @@ -1,242 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package keyprovider - -import ( - "context" - "encoding/json" - "github.com/containers/ocicrypt/config" - keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" - "github.com/containers/ocicrypt/keywrap" - "github.com/containers/ocicrypt/utils" - keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc" -) - -type keyProviderKeyWrapper struct { - provider string - attrs keyproviderconfig.KeyProviderAttrs -} - -func (kw *keyProviderKeyWrapper) GetAnnotationID() string { - return "org.opencontainers.image.enc.keys.provider." + kw.provider -} - -// NewKeyWrapper returns a new key wrapping interface using keyprovider -func NewKeyWrapper(p string, a keyproviderconfig.KeyProviderAttrs) keywrap.KeyWrapper { - return &keyProviderKeyWrapper{provider: p, attrs: a} -} - -type KeyProviderKeyWrapProtocolOperation string - -var ( - OpKeyWrap KeyProviderKeyWrapProtocolOperation = "keywrap" - OpKeyUnwrap KeyProviderKeyWrapProtocolOperation = "keyunwrap" -) - -// KeyProviderKeyWrapProtocolInput defines the input to the key provider binary or grpc method. -type KeyProviderKeyWrapProtocolInput struct { - // Operation is either "keywrap" or "keyunwrap" - Operation KeyProviderKeyWrapProtocolOperation `json:"op"` - // KeyWrapParams encodes the arguments to key wrap if operation is set to wrap - KeyWrapParams KeyWrapParams `json:"keywrapparams,omitempty"` - // KeyUnwrapParams encodes the arguments to key unwrap if operation is set to unwrap - KeyUnwrapParams KeyUnwrapParams `json:"keyunwrapparams,omitempty"` -} - -// KeyProviderKeyWrapProtocolOutput defines the output of the key provider binary or grpc method. -type KeyProviderKeyWrapProtocolOutput struct { - // KeyWrapResult encodes the results to key wrap if operation is to wrap - KeyWrapResults KeyWrapResults `json:"keywrapresults,omitempty"` - // KeyUnwrapResult encodes the result to key unwrap if operation is to unwrap - KeyUnwrapResults KeyUnwrapResults `json:"keyunwrapresults,omitempty"` -} - -type KeyWrapParams struct { - Ec *config.EncryptConfig `json:"ec"` - OptsData []byte `json:"optsdata"` -} - -type KeyUnwrapParams struct { - Dc *config.DecryptConfig `json:"dc"` - Annotation []byte `json:"annotation"` -} - -type KeyUnwrapResults struct { - OptsData []byte `json:"optsdata"` -} - -type KeyWrapResults struct { - Annotation []byte `json:"annotation"` -} - -var runner utils.CommandExecuter - -func init() { - runner = utils.Runner{} -} - -// WrapKeys calls appropriate binary executable/grpc server for wrapping the session key for recipients and gets encrypted optsData, which -// describe the symmetric key used for encrypting the layer -func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { - - input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ - Operation: OpKeyWrap, - KeyWrapParams: KeyWrapParams{ - Ec: ec, - OptsData: optsData, - }, - }) - - if err != nil { - return nil, err - } - - if _, ok := ec.Parameters[kw.provider]; ok { - if kw.attrs.Command != nil { - protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) - if err != nil { - return nil, errors.Wrap(err, "error while retrieving keyprovider protocol command output") - } - return protocolOuput.KeyWrapResults.Annotation, nil - } else if kw.attrs.Grpc != "" { - protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) - if err != nil { - return nil, errors.Wrap(err, "error while retrieving keyprovider protocol grpc output") - } - - return protocolOuput.KeyWrapResults.Annotation, nil - } else { - return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") - } - } - - return nil, nil -} - -// UnwrapKey calls appropriate binary executable/grpc server for unwrapping the session key based on the protocol given in annotation for recipients and gets decrypted optsData, -// which describe the symmetric key used for decrypting the layer -func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { - input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ - Operation: OpKeyUnwrap, - KeyUnwrapParams: KeyUnwrapParams{ - Dc: dc, - Annotation: jsonString, - }, - }) - if err != nil { - return nil, err - } - - if kw.attrs.Command != nil { - protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) - if err != nil { - // If err is not nil, then ignore it and continue with rest of the given keyproviders - return nil, err - } - - return protocolOuput.KeyUnwrapResults.OptsData, nil - } else if kw.attrs.Grpc != "" { - protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyUnwrap) - if err != nil { - // If err is not nil, then ignore it and continue with rest of the given keyproviders - return nil, err - } - - return protocolOuput.KeyUnwrapResults.OptsData, nil - } else { - return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") - } -} - -func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { - var protocolOuput KeyProviderKeyWrapProtocolOutput - var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput - cc, err := grpc.Dial(connString, grpc.WithInsecure()) - if err != nil { - return nil, errors.Wrap(err, "error while dialing rpc server") - } - defer func() { - derr := cc.Close() - if derr != nil { - log.WithError(derr).Error("Error closing grpc socket") - } - }() - - client := keyproviderpb.NewKeyProviderServiceClient(cc) - req := &keyproviderpb.KeyProviderKeyWrapProtocolInput{ - KeyProviderKeyWrapProtocolInput: input, - } - - if operation == OpKeyWrap { - grpcOutput, err = client.WrapKey(context.Background(), req) - if err != nil { - return nil, errors.Wrap(err, "Error from grpc method") - } - } else if operation == OpKeyUnwrap { - grpcOutput, err = client.UnWrapKey(context.Background(), req) - if err != nil { - return nil, errors.Wrap(err, "Error from grpc method") - } - } else { - return nil, errors.New("Unsupported operation") - } - - respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() - err = json.Unmarshal(respBytes, &protocolOuput) - if err != nil { - return nil, errors.Wrap(err, "Error while unmarshalling grpc method output") - } - - return &protocolOuput, nil -} - -func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) (*KeyProviderKeyWrapProtocolOutput, error) { - var protocolOuput KeyProviderKeyWrapProtocolOutput - // Convert interface to command structure - respBytes, err := runner.Exec(command.Path, command.Args, input) - if err != nil { - return nil, err - } - err = json.Unmarshal(respBytes, &protocolOuput) - if err != nil { - return nil, errors.Wrap(err, "Error while unmarshalling binary executable command output") - } - return &protocolOuput, nil -} - -// Return false as it is not applicable to keyprovider protocol -func (kw *keyProviderKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { - return false -} - -// Return nil as it is not applicable to keyprovider protocol -func (kw *keyProviderKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { - return nil -} - -// Return nil as it is not applicable to keyprovider protocol -func (kw *keyProviderKeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { - return nil, nil -} - -// Return nil as it is not applicable to keyprovider protocol -func (kw *keyProviderKeyWrapper) GetRecipients(_ string) ([]string, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go deleted file mode 100644 index ed25e7d..0000000 --- a/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package keywrap - -import ( - "github.com/containers/ocicrypt/config" -) - -// KeyWrapper is the interface used for wrapping keys using -// a specific encryption technology (pgp, jwe) -type KeyWrapper interface { - WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) - UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error) - GetAnnotationID() string - - // NoPossibleKeys returns true if there is no possibility of performing - // decryption for parameters provided. - NoPossibleKeys(dcparameters map[string][][]byte) bool - - // GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation - // as in some key services, a private key may not be exportable (i.e. HSM) - // If not implemented, return nil - GetPrivateKeys(dcparameters map[string][][]byte) [][]byte - - // GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption - // schemes may not have a notion of key IDs - // If not implemented, return the nil slice - GetKeyIdsFromPacket(packet string) ([]uint64, error) - - // GetRecipients (optional) gets a list of recipients. It is optional due to the validity of - // recipients in a particular encryptiong scheme - // If not implemented, return the nil slice - GetRecipients(packet string) ([]string, error) -} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go deleted file mode 100644 index 275a3d8..0000000 --- a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go +++ /dev/null @@ -1,273 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgp - -import ( - "bytes" - "crypto" - "crypto/rand" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/mail" - "strconv" - "strings" - - "github.com/containers/ocicrypt/config" - "github.com/containers/ocicrypt/keywrap" - "github.com/pkg/errors" - "golang.org/x/crypto/openpgp" - "golang.org/x/crypto/openpgp/packet" -) - -type gpgKeyWrapper struct { -} - -// NewKeyWrapper returns a new key wrapping interface for pgp -func NewKeyWrapper() keywrap.KeyWrapper { - return &gpgKeyWrapper{} -} - -var ( - // GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption - GPGDefaultEncryptConfig = &packet.Config{ - Rand: rand.Reader, - DefaultHash: crypto.SHA256, - DefaultCipher: packet.CipherAES256, - CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression - RSABits: 2048, - } -) - -func (kw *gpgKeyWrapper) GetAnnotationID() string { - return "org.opencontainers.image.enc.keys.pgp" -} - -// WrapKeys wraps the session key for recpients and encrypts the optsData, which -// describe the symmetric key used for encrypting the layer -func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { - ciphertext := new(bytes.Buffer) - el, err := kw.createEntityList(ec) - if err != nil { - return nil, errors.Wrap(err, "unable to create entity list") - } - if len(el) == 0 { - // nothing to do -- not an error - return nil, nil - } - - plaintextWriter, err := openpgp.Encrypt(ciphertext, - el, /*EntityList*/ - nil, /* Sign*/ - nil, /* FileHint */ - GPGDefaultEncryptConfig) - if err != nil { - return nil, err - } - - if _, err = plaintextWriter.Write(optsData); err != nil { - return nil, err - } else if err = plaintextWriter.Close(); err != nil { - return nil, err - } - return ciphertext.Bytes(), err -} - -// UnwrapKey unwraps the symmetric key with which the layer is encrypted -// This symmetric key is encrypted in the PGP payload. -func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) { - pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters) - if err != nil { - return nil, err - } - - for idx, pgpPrivateKey := range pgpPrivateKeys { - r := bytes.NewBuffer(pgpPrivateKey) - entityList, err := openpgp.ReadKeyRing(r) - if err != nil { - return nil, errors.Wrap(err, "unable to parse private keys") - } - - var prompt openpgp.PromptFunction - if len(pgpPrivateKeysPwd) > idx { - responded := false - prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) { - if responded { - return nil, fmt.Errorf("don't seem to have the right password") - } - responded = true - for _, key := range keys { - if key.PrivateKey != nil { - _ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx]) - } - } - return pgpPrivateKeysPwd[idx], nil - } - } - - r = bytes.NewBuffer(pgpPacket) - md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig) - if err != nil { - continue - } - // we get the plain key options back - optsData, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - continue - } - return optsData, nil - } - return nil, errors.New("PGP: No suitable key found to unwrap key") -} - -// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds -func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) { - - var keyids []uint64 - for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") { - pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket) - if err != nil { - return nil, errors.Wrapf(err, "could not decode base64 encoded PGP packet") - } - newids, err := kw.getKeyIDs(pgpPacket) - if err != nil { - return nil, err - } - keyids = append(keyids, newids...) - } - return keyids, nil -} - -// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs -func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) { - var keyids []uint64 - - kbuf := bytes.NewBuffer(pgpPacket) - packets := packet.NewReader(kbuf) -ParsePackets: - for { - p, err := packets.Next() - if err == io.EOF { - break ParsePackets - } - if err != nil { - return []uint64{}, errors.Wrapf(err, "packets.Next() failed") - } - switch p := p.(type) { - case *packet.EncryptedKey: - keyids = append(keyids, p.KeyId) - case *packet.SymmetricallyEncrypted: - break ParsePackets - } - } - return keyids, nil -} - -// GetRecipients converts the wrappedKeys to an array of recipients -func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) { - keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets) - if err != nil { - return nil, err - } - var array []string - for _, keyid := range keyIds { - array = append(array, "0x"+strconv.FormatUint(keyid, 16)) - } - return array, nil -} - -func (kw *gpgKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { - return len(kw.GetPrivateKeys(dcparameters)) == 0 -} - -func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { - return dcparameters["gpg-privatekeys"] -} - -func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) { - - privKeys := kw.GetPrivateKeys(dcparameters) - if len(privKeys) == 0 { - return nil, nil, errors.New("GPG: Missing private key parameter") - } - - return privKeys, dcparameters["gpg-privatekeys-passwords"], nil -} - -// createEntityList creates the opengpg EntityList by reading the KeyRing -// first and then filtering out recipients' keys -func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) { - pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"] - if len(pgpPubringFile) == 0 { - return nil, nil - } - r := bytes.NewReader(pgpPubringFile[0]) - - entityList, err := openpgp.ReadKeyRing(r) - if err != nil { - return nil, err - } - - gpgRecipients := ec.Parameters["gpg-recipients"] - if len(gpgRecipients) == 0 { - return nil, nil - } - - rSet := make(map[string]int) - for _, r := range gpgRecipients { - rSet[string(r)] = 0 - } - - var filteredList openpgp.EntityList - for _, entity := range entityList { - for k := range entity.Identities { - addr, err := mail.ParseAddress(k) - if err != nil { - return nil, err - } - for _, r := range gpgRecipients { - recp := string(r) - if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 { - filteredList = append(filteredList, entity) - rSet[recp] = rSet[recp] + 1 - } - } - } - } - - // make sure we found keys for all the Recipients... - var buffer bytes.Buffer - notFound := false - buffer.WriteString("PGP: No key found for the following recipients: ") - - for k, v := range rSet { - if v == 0 { - if notFound { - buffer.WriteString(", ") - } - buffer.WriteString(k) - notFound = true - } - } - - if notFound { - return nil, errors.New(buffer.String()) - } - - return filteredList, nil -} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go deleted file mode 100644 index 803b908..0000000 --- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pkcs11 - -import ( - "github.com/containers/ocicrypt/config" - "github.com/containers/ocicrypt/crypto/pkcs11" - "github.com/containers/ocicrypt/keywrap" - "github.com/containers/ocicrypt/utils" - - "github.com/pkg/errors" -) - -type pkcs11KeyWrapper struct { -} - -func (kw *pkcs11KeyWrapper) GetAnnotationID() string { - return "org.opencontainers.image.enc.keys.pkcs11" -} - -// NewKeyWrapper returns a new key wrapping interface using pkcs11 -func NewKeyWrapper() keywrap.KeyWrapper { - return &pkcs11KeyWrapper{} -} - -// WrapKeys wraps the session key for recpients and encrypts the optsData, which -// describe the symmetric key used for encrypting the layer -func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { - pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...)) - if err != nil { - return nil, err - } - // no recipients is not an error... - if len(pkcs11Recipients) == 0 { - return nil, nil - } - - jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) - if err != nil { - return nil, errors.Wrapf(err, "PKCS11 EncryptMulitple failed") - } - return jsonString, nil -} - -func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { - var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject - - privKeys := kw.GetPrivateKeys(dc.Parameters) - if len(privKeys) == 0 { - return nil, errors.New("No private keys found for PKCS11 decryption") - } - - p11conf, err := p11confFromParameters(dc.Parameters) - if err != nil { - return nil, err - } - - for _, privKey := range privKeys { - key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11") - if err != nil { - return nil, err - } - switch pkcs11PrivKey := key.(type) { - case *pkcs11.Pkcs11KeyFileObject: - if p11conf != nil { - pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) - pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) - } - pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey) - default: - continue - } - } - - plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString) - if err == nil { - return plaintext, nil - } - - return nil, errors.Wrapf(err, "PKCS11: No suitable private key found for decryption") -} - -func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { - return len(kw.GetPrivateKeys(dcparameters)) == 0 -} - -func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { - return dcparameters["pkcs11-yamls"] -} - -func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { - return nil, nil -} - -func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) { - return []string{"[pkcs11]"}, nil -} - -func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) { - var pkcs11Keys []interface{} - - if len(pubKeys) == 0 { - return pkcs11Keys, nil - } - - p11conf, err := p11confFromParameters(dc.Parameters) - if err != nil { - return nil, err - } - - for _, pubKey := range pubKeys { - key, err := utils.ParsePublicKey(pubKey, "PKCS11") - if err != nil { - return nil, err - } - switch pkcs11PubKey := key.(type) { - case *pkcs11.Pkcs11KeyFileObject: - if p11conf != nil { - pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) - pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) - } - } - pkcs11Keys = append(pkcs11Keys, key) - } - return pkcs11Keys, nil -} - -func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error){ - if _, ok := dcparameters["pkcs11-config"]; ok { - return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) - } - return nil, nil -} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go deleted file mode 100644 index 1feae46..0000000 --- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pkcs7 - -import ( - "crypto" - "crypto/x509" - - "github.com/containers/ocicrypt/config" - "github.com/containers/ocicrypt/keywrap" - "github.com/containers/ocicrypt/utils" - "github.com/pkg/errors" - "go.mozilla.org/pkcs7" -) - -type pkcs7KeyWrapper struct { -} - -// NewKeyWrapper returns a new key wrapping interface using jwe -func NewKeyWrapper() keywrap.KeyWrapper { - return &pkcs7KeyWrapper{} -} - -func (kw *pkcs7KeyWrapper) GetAnnotationID() string { - return "org.opencontainers.image.enc.keys.pkcs7" -} - -// WrapKeys wraps the session key for recpients and encrypts the optsData, which -// describe the symmetric key used for encrypting the layer -func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { - x509Certs, err := collectX509s(ec.Parameters["x509s"]) - if err != nil { - return nil, err - } - // no recipients is not an error... - if len(x509Certs) == 0 { - return nil, nil - } - - pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM - return pkcs7.Encrypt(optsData, x509Certs) -} - -func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) { - if len(x509s) == 0 { - return nil, nil - } - var x509Certs []*x509.Certificate - for _, x509 := range x509s { - x509Cert, err := utils.ParseCertificate(x509, "PKCS7") - if err != nil { - return nil, err - } - x509Certs = append(x509Certs, x509Cert) - } - return x509Certs, nil -} - -func (kw *pkcs7KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { - return len(kw.GetPrivateKeys(dcparameters)) == 0 -} - -func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { - return dcparameters["privkeys"] -} - -func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { - return dcparameters["privkeys-passwords"] -} - -// UnwrapKey unwraps the symmetric key with which the layer is encrypted -// This symmetric key is encrypted in the PKCS7 payload. -func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) { - privKeys := kw.GetPrivateKeys(dc.Parameters) - if len(privKeys) == 0 { - return nil, errors.New("no private keys found for PKCS7 decryption") - } - privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) - if len(privKeysPasswords) != len(privKeys) { - return nil, errors.New("private key password array length must be same as that of private keys") - } - - x509Certs, err := collectX509s(dc.Parameters["x509s"]) - if err != nil { - return nil, err - } - if len(x509Certs) == 0 { - return nil, errors.New("no x509 certificates found needed for PKCS7 decryption") - } - - p7, err := pkcs7.Parse(pkcs7Packet) - if err != nil { - return nil, errors.Wrapf(err, "could not parse PKCS7 packet") - } - - for idx, privKey := range privKeys { - key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7") - if err != nil { - return nil, err - } - for _, x509Cert := range x509Certs { - optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key)) - if err != nil { - continue - } - return optsData, nil - } - } - return nil, errors.New("PKCS7: No suitable private key found for decryption") -} - -// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds; -// We cannot do this with pkcs7 -func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) { - return nil, nil -} - -// GetRecipients converts the wrappedKeys to an array of recipients -// We cannot do this with pkcs7 -func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) { - return []string{"[pkcs7]"}, nil -} diff --git a/vendor/github.com/containers/ocicrypt/reader.go b/vendor/github.com/containers/ocicrypt/reader.go deleted file mode 100644 index a93eec8..0000000 --- a/vendor/github.com/containers/ocicrypt/reader.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ocicrypt - -import ( - "io" -) - -type readerAtReader struct { - r io.ReaderAt - off int64 -} - -// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader -func ReaderFromReaderAt(r io.ReaderAt) io.Reader { - return &readerAtReader{ - r: r, - off: 0, - } -} - -func (rar *readerAtReader) Read(p []byte) (n int, err error) { - n, err = rar.r.ReadAt(p, rar.off) - rar.off += int64(n) - return n, err -} diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go deleted file mode 100644 index 330069d..0000000 --- a/vendor/github.com/containers/ocicrypt/spec/spec.go +++ /dev/null @@ -1,12 +0,0 @@ -package spec - -const ( - // MediaTypeLayerEnc is MIME type used for encrypted layers. - MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted" - // MediaTypeLayerGzipEnc is MIME type used for encrypted compressed layers. - MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted" - // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers. - MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted" - // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted compressed layers. - MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted" -) diff --git a/vendor/github.com/containers/ocicrypt/utils/delayedreader.go b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go deleted file mode 100644 index 3b939bd..0000000 --- a/vendor/github.com/containers/ocicrypt/utils/delayedreader.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package utils - -import ( - "io" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -// DelayedReader wraps a io.Reader and allows a client to use the Reader -// interface. The DelayedReader holds back some buffer to the client -// so that it can report any error that occurred on the Reader it wraps -// early to the client while it may still have held some data back. -type DelayedReader struct { - reader io.Reader // Reader to Read() bytes from and delay them - err error // error that occurred on the reader - buffer []byte // delay buffer - bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller - bufoff int // offset in the delay buffer to give to Read() -} - -// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes -func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader { - return &DelayedReader{ - reader: reader, - buffer: make([]byte, bufsize), - } -} - -// Read implements the io.Reader interface -func (dr *DelayedReader) Read(p []byte) (int, error) { - if dr.err != nil && dr.err != io.EOF { - return 0, dr.err - } - - // if we are completely drained, return io.EOF - if dr.err == io.EOF && dr.bufbytes == 0 { - return 0, io.EOF - } - - // only at the beginning we fill our delay buffer in an extra step - if dr.bufbytes < len(dr.buffer) && dr.err == nil { - dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer) - if dr.err != nil && dr.err != io.EOF { - return 0, dr.err - } - } - // dr.err != nil means we have EOF and can drain the delay buffer - // otherwise we need to still read from the reader - - var tmpbuf []byte - tmpbufbytes := 0 - if dr.err == nil { - tmpbuf = make([]byte, len(p)) - tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf) - if dr.err != nil && dr.err != io.EOF { - return 0, dr.err - } - } - - // copy out of the delay buffer into 'p' - tocopy1 := min(len(p), dr.bufbytes) - c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:]) - dr.bufoff += c1 - dr.bufbytes -= c1 - - c2 := 0 - // can p still hold more data? - if c1 < len(p) { - // copy out of the tmpbuf into 'p' - c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes]) - } - - // if tmpbuf holds data we need to hold onto, copy them - // into the delay buffer - if tmpbufbytes-c2 > 0 { - // left-shift the delay buffer and append the tmpbuf's remaining data - dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes] - dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...) - dr.bufoff = 0 - dr.bufbytes = len(dr.buffer) - } - - var err error - if dr.bufbytes == 0 { - err = io.EOF - } - return c1 + c2, err -} diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go deleted file mode 100644 index 078c347..0000000 --- a/vendor/github.com/containers/ocicrypt/utils/ioutils.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package utils - -import ( - "bytes" - "io" - "os/exec" - "github.com/pkg/errors" -) - -// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns -// EOF if an EOF was encountered or any other error. -func FillBuffer(reader io.Reader, buffer []byte) (int, error) { - n, err := io.ReadFull(reader, buffer) - if err == io.ErrUnexpectedEOF { - return n, io.EOF - } - return n, err -} - -// first argument is the command, like cat or echo, -// the second is the list of args to pass to it -type CommandExecuter interface { - Exec(string, []string, []byte) ([]byte, error) -} - -type Runner struct{} - -// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. -func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { - var out bytes.Buffer - stdInputBuffer := bytes.NewBuffer(input) - cmd := exec.Command(cmdName, args...) - cmd.Stdin = stdInputBuffer - cmd.Stdout = &out - err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "Error while running command: %s", cmdName) - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go deleted file mode 100644 index dc477d3..0000000 --- a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: keyprovider.proto - -package keyprovider - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type KeyProviderKeyWrapProtocolInput struct { - KeyProviderKeyWrapProtocolInput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolInput,proto3" json:"KeyProviderKeyWrapProtocolInput,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyProviderKeyWrapProtocolInput) Reset() { *m = KeyProviderKeyWrapProtocolInput{} } -func (m *KeyProviderKeyWrapProtocolInput) String() string { return proto.CompactTextString(m) } -func (*KeyProviderKeyWrapProtocolInput) ProtoMessage() {} -func (*KeyProviderKeyWrapProtocolInput) Descriptor() ([]byte, []int) { - return fileDescriptor_da74c8e785ad390c, []int{0} -} - -func (m *KeyProviderKeyWrapProtocolInput) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Unmarshal(m, b) -} -func (m *KeyProviderKeyWrapProtocolInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Marshal(b, m, deterministic) -} -func (m *KeyProviderKeyWrapProtocolInput) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Merge(m, src) -} -func (m *KeyProviderKeyWrapProtocolInput) XXX_Size() int { - return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Size(m) -} -func (m *KeyProviderKeyWrapProtocolInput) XXX_DiscardUnknown() { - xxx_messageInfo_KeyProviderKeyWrapProtocolInput.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyProviderKeyWrapProtocolInput proto.InternalMessageInfo - -func (m *KeyProviderKeyWrapProtocolInput) GetKeyProviderKeyWrapProtocolInput() []byte { - if m != nil { - return m.KeyProviderKeyWrapProtocolInput - } - return nil -} - -type KeyProviderKeyWrapProtocolOutput struct { - KeyProviderKeyWrapProtocolOutput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolOutput,proto3" json:"KeyProviderKeyWrapProtocolOutput,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyProviderKeyWrapProtocolOutput) Reset() { *m = KeyProviderKeyWrapProtocolOutput{} } -func (m *KeyProviderKeyWrapProtocolOutput) String() string { return proto.CompactTextString(m) } -func (*KeyProviderKeyWrapProtocolOutput) ProtoMessage() {} -func (*KeyProviderKeyWrapProtocolOutput) Descriptor() ([]byte, []int) { - return fileDescriptor_da74c8e785ad390c, []int{1} -} - -func (m *KeyProviderKeyWrapProtocolOutput) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Unmarshal(m, b) -} -func (m *KeyProviderKeyWrapProtocolOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Marshal(b, m, deterministic) -} -func (m *KeyProviderKeyWrapProtocolOutput) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Merge(m, src) -} -func (m *KeyProviderKeyWrapProtocolOutput) XXX_Size() int { - return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Size(m) -} -func (m *KeyProviderKeyWrapProtocolOutput) XXX_DiscardUnknown() { - xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyProviderKeyWrapProtocolOutput proto.InternalMessageInfo - -func (m *KeyProviderKeyWrapProtocolOutput) GetKeyProviderKeyWrapProtocolOutput() []byte { - if m != nil { - return m.KeyProviderKeyWrapProtocolOutput - } - return nil -} - -func init() { - proto.RegisterType((*KeyProviderKeyWrapProtocolInput)(nil), "keyprovider.keyProviderKeyWrapProtocolInput") - proto.RegisterType((*KeyProviderKeyWrapProtocolOutput)(nil), "keyprovider.keyProviderKeyWrapProtocolOutput") -} - -func init() { - proto.RegisterFile("keyprovider.proto", fileDescriptor_da74c8e785ad390c) -} - -var fileDescriptor_da74c8e785ad390c = []byte{ - // 169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4e, 0xad, 0x2c, - 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x03, 0x32, 0x4a, 0xf2, 0x85, 0xb8, 0x91, 0x84, - 0x94, 0xb2, 0xb9, 0xe4, 0x81, 0xdc, 0x00, 0x28, 0xd7, 0x3b, 0xb5, 0x32, 0xbc, 0x28, 0xb1, 0x20, - 0x00, 0xa4, 0x2e, 0x39, 0x3f, 0xc7, 0x33, 0xaf, 0xa0, 0xb4, 0x44, 0xc8, 0x83, 0x4b, 0xde, 0x1b, - 0xbf, 0x12, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x42, 0xca, 0x94, 0xf2, 0xb8, 0x14, 0x70, - 0x5b, 0xe6, 0x5f, 0x5a, 0x02, 0xb2, 0xcd, 0x8b, 0x4b, 0xc1, 0x9b, 0x80, 0x1a, 0xa8, 0x75, 0x04, - 0xd5, 0x19, 0xbd, 0x62, 0xe4, 0x12, 0x42, 0x52, 0x14, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, - 0x94, 0xc1, 0xc5, 0x0e, 0x52, 0x0c, 0x94, 0x11, 0xd2, 0xd1, 0x43, 0x0e, 0x1f, 0x02, 0x21, 0x21, - 0xa5, 0x4b, 0xa4, 0x6a, 0x88, 0xf5, 0x4a, 0x0c, 0x42, 0x59, 0x5c, 0x9c, 0xa1, 0x79, 0xf4, 0xb1, - 0xcb, 0x89, 0x37, 0x0a, 0x39, 0x62, 0x93, 0xd8, 0xc0, 0x91, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, - 0xff, 0x9a, 0x10, 0xcb, 0xf9, 0x01, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// KeyProviderServiceClient is the client API for KeyProviderService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type KeyProviderServiceClient interface { - WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) - UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) -} - -type keyProviderServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewKeyProviderServiceClient(cc grpc.ClientConnInterface) KeyProviderServiceClient { - return &keyProviderServiceClient{cc} -} - -func (c *keyProviderServiceClient) WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { - out := new(KeyProviderKeyWrapProtocolOutput) - err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/WrapKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *keyProviderServiceClient) UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { - out := new(KeyProviderKeyWrapProtocolOutput) - err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/UnWrapKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// KeyProviderServiceServer is the server API for KeyProviderService service. -type KeyProviderServiceServer interface { - WrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) - UnWrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) -} - -// UnimplementedKeyProviderServiceServer can be embedded to have forward compatible implementations. -type UnimplementedKeyProviderServiceServer struct { -} - -func (*UnimplementedKeyProviderServiceServer) WrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { - return nil, status.Errorf(codes.Unimplemented, "method WrapKey not implemented") -} -func (*UnimplementedKeyProviderServiceServer) UnWrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { - return nil, status.Errorf(codes.Unimplemented, "method UnWrapKey not implemented") -} - -func RegisterKeyProviderServiceServer(s *grpc.Server, srv KeyProviderServiceServer) { - s.RegisterService(&_KeyProviderService_serviceDesc, srv) -} - -func _KeyProviderService_WrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KeyProviderKeyWrapProtocolInput) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KeyProviderServiceServer).WrapKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/keyprovider.KeyProviderService/WrapKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KeyProviderServiceServer).WrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) - } - return interceptor(ctx, in, info, handler) -} - -func _KeyProviderService_UnWrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KeyProviderKeyWrapProtocolInput) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KeyProviderServiceServer).UnWrapKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/keyprovider.KeyProviderService/UnWrapKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KeyProviderServiceServer).UnWrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) - } - return interceptor(ctx, in, info, handler) -} - -var _KeyProviderService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "keyprovider.KeyProviderService", - HandlerType: (*KeyProviderServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "WrapKey", - Handler: _KeyProviderService_WrapKey_Handler, - }, - { - MethodName: "UnWrapKey", - Handler: _KeyProviderService_UnWrapKey_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "keyprovider.proto", -} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto deleted file mode 100644 index a71f0a5..0000000 --- a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package keyprovider; -option go_package = "keyprovider"; - -message keyProviderKeyWrapProtocolInput { - bytes KeyProviderKeyWrapProtocolInput = 1; -} - -message keyProviderKeyWrapProtocolOutput { - bytes KeyProviderKeyWrapProtocolOutput = 1; -} - -service KeyProviderService { - rpc WrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; - rpc UnWrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; -} \ No newline at end of file diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go deleted file mode 100644 index 38633b1..0000000 --- a/vendor/github.com/containers/ocicrypt/utils/testing.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package utils - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "math/big" - "time" - - "github.com/pkg/errors" -) - -// CreateRSAKey creates an RSA key -func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { - key, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, errors.Wrap(err, "rsa.GenerateKey failed") - } - return key, nil -} - -// CreateRSATestKey creates an RSA key of the given size and returns -// the public and private key in PEM or DER format -func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) { - key, err := CreateRSAKey(bits) - if err != nil { - return nil, nil, err - } - - pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) - if err != nil { - return nil, nil, errors.Wrap(err, "x509.MarshalPKIXPublicKey failed") - } - privData := x509.MarshalPKCS1PrivateKey(key) - - // no more encoding needed for DER - if !pemencode { - return pubData, privData, nil - } - - publicKey := pem.EncodeToMemory(&pem.Block{ - Type: "PUBLIC KEY", - Bytes: pubData, - }) - - var block *pem.Block - - typ := "RSA PRIVATE KEY" - if len(password) > 0 { - block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility - if err != nil { - return nil, nil, errors.Wrap(err, "x509.EncryptPEMBlock failed") - } - } else { - block = &pem.Block{ - Type: typ, - Bytes: privData, - } - } - - privateKey := pem.EncodeToMemory(block) - - return publicKey, privateKey, nil -} - -// CreateECDSATestKey creates and elliptic curve key for the given curve and returns -// the public and private key in DER format -func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { - key, err := ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, nil, errors.Wrapf(err, "ecdsa.GenerateKey failed") - } - - pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) - if err != nil { - return nil, nil, errors.Wrapf(err, "x509.MarshalPKIXPublicKey failed") - } - - privData, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, nil, errors.Wrapf(err, "x509.MarshalECPrivateKey failed") - } - - return pubData, privData, nil -} - -// CreateTestCA creates a root CA for testing -func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) { - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, nil, errors.Wrap(err, "rsa.GenerateKey failed") - } - - ca := &x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - CommonName: "test-ca", - }, - NotBefore: time.Now(), - NotAfter: time.Now().AddDate(1, 0, 0), - IsCA: true, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - BasicConstraintsValid: true, - } - caCert, err := certifyKey(&key.PublicKey, ca, key, ca) - - return key, caCert, err -} - -// CertifyKey certifies a public key using the given CA's private key and cert; -// The certificate template for the public key is optional -func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { - pubKey, err := ParsePublicKey(pubbytes, "CertifyKey") - if err != nil { - return nil, err - } - return certifyKey(pubKey, template, caKey, caCert) -} - -func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { - if template == nil { - template = &x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - CommonName: "testkey", - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour), - IsCA: false, - KeyUsage: x509.KeyUsageDigitalSignature, - BasicConstraintsValid: true, - } - } - - certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey) - if err != nil { - return nil, errors.Wrap(err, "x509.CreateCertificate failed") - } - - cert, err := x509.ParseCertificate(certDER) - if err != nil { - return nil, errors.Wrap(err, "x509.ParseCertificate failed") - } - - return cert, nil -} diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go deleted file mode 100644 index 07fe6d3..0000000 --- a/vendor/github.com/containers/ocicrypt/utils/utils.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - Copyright The ocicrypt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package utils - -import ( - "bytes" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "fmt" - "strings" - - "github.com/containers/ocicrypt/crypto/pkcs11" - - "github.com/pkg/errors" - "golang.org/x/crypto/openpgp" - json "gopkg.in/square/go-jose.v2" -) - -// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key -func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { - jwk := json.JSONWebKey{} - err := jwk.UnmarshalJSON(privKey) - if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix) - } - if jwk.IsPublic() { - return nil, fmt.Errorf("%s: JWK is not a private key", prefix) - } - return &jwk, nil -} - -// parseJWKPublicKey parses the input byte array as a JWK -func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { - jwk := json.JSONWebKey{} - err := jwk.UnmarshalJSON(privKey) - if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix) - } - if !jwk.IsPublic() { - return nil, fmt.Errorf("%s: JWK is not a public key", prefix) - } - return &jwk, nil -} - -// parsePkcs11PrivateKeyYaml parses the input byte array as pkcs11 key file yaml format) -func parsePkcs11PrivateKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { - // if the URI does not have enough attributes, we will throw an error when decrypting - return pkcs11.ParsePkcs11KeyFile(yaml) -} - -// parsePkcs11URIPublicKey parses the input byte array as a pkcs11 key file yaml -func parsePkcs11PublicKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { - // if the URI does not have enough attributes, we will throw an error when decrypting - return pkcs11.ParsePkcs11KeyFile(yaml) -} - -// IsPasswordError checks whether an error is related to a missing or wrong -// password -func IsPasswordError(err error) bool { - if err == nil { - return false - } - msg := strings.ToLower(err.Error()) - - return strings.Contains(msg, "password") && - (strings.Contains(msg, "missing") || strings.Contains(msg, "wrong")) -} - -// ParsePrivateKey tries to parse a private key in DER format first and -// PEM format after, returning an error if the parsing failed -func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) { - key, err := x509.ParsePKCS8PrivateKey(privKey) - if err != nil { - key, err = x509.ParsePKCS1PrivateKey(privKey) - if err != nil { - key, err = x509.ParseECPrivateKey(privKey) - } - } - if err != nil { - block, _ := pem.Decode(privKey) - if block != nil { - var der []byte - if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility - if privKeyPassword == nil { - return nil, errors.Errorf("%s: Missing password for encrypted private key", prefix) - } - der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility - if err != nil { - return nil, errors.Errorf("%s: Wrong password: could not decrypt private key", prefix) - } - } else { - der = block.Bytes - } - - key, err = x509.ParsePKCS8PrivateKey(der) - if err != nil { - key, err = x509.ParsePKCS1PrivateKey(der) - if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse private key", prefix) - } - } - } else { - key, err = parseJWKPrivateKey(privKey, prefix) - if err != nil { - key, err = parsePkcs11PrivateKeyYaml(privKey, prefix) - } - } - } - return key, err -} - -// IsPrivateKey returns true in case the given byte array represents a private key -// It returns an error if for example the password is wrong -func IsPrivateKey(data []byte, password []byte) (bool, error) { - _, err := ParsePrivateKey(data, password, "") - return err == nil, err -} - -// IsPkcs11PrivateKey returns true in case the given byte array represents a pkcs11 private key -func IsPkcs11PrivateKey(data []byte) bool { - return pkcs11.IsPkcs11PrivateKey(data) -} - -// ParsePublicKey tries to parse a public key in DER format first and -// PEM format after, returning an error if the parsing failed -func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { - key, err := x509.ParsePKIXPublicKey(pubKey) - if err != nil { - block, _ := pem.Decode(pubKey) - if block != nil { - key, err = x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse public key", prefix) - } - } else { - key, err = parseJWKPublicKey(pubKey, prefix) - if err != nil { - key, err = parsePkcs11PublicKeyYaml(pubKey, prefix) - } - } - } - return key, err -} - -// IsPublicKey returns true in case the given byte array represents a public key -func IsPublicKey(data []byte) bool { - _, err := ParsePublicKey(data, "") - return err == nil -} - -// IsPkcs11PublicKey returns true in case the given byte array represents a pkcs11 public key -func IsPkcs11PublicKey(data []byte) bool { - return pkcs11.IsPkcs11PublicKey(data) -} - -// ParseCertificate tries to parse a public key in DER format first and -// PEM format after, returning an error if the parsing failed -func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) { - x509Cert, err := x509.ParseCertificate(certBytes) - if err != nil { - block, _ := pem.Decode(certBytes) - if block == nil { - return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix) - } - x509Cert, err = x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse x509 certificate", prefix) - } - } - return x509Cert, err -} - -// IsCertificate returns true in case the given byte array represents an x.509 certificate -func IsCertificate(data []byte) bool { - _, err := ParseCertificate(data, "") - return err == nil -} - -// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file -func IsGPGPrivateKeyRing(data []byte) bool { - r := bytes.NewBuffer(data) - _, err := openpgp.ReadKeyRing(r) - return err == nil -} - -// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into -// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509 -// certificate -func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) { - dcparameters := make(map[string][][]byte) - - for _, b64Item := range strings.Split(b64ItemList, ",") { - var password []byte - b64Data := strings.Split(b64Item, ":") - keyData, err := base64.StdEncoding.DecodeString(b64Data[0]) - if err != nil { - return nil, errors.New("Could not base64 decode a passed decryption key") - } - if len(b64Data) == 2 { - password, err = base64.StdEncoding.DecodeString(b64Data[1]) - if err != nil { - return nil, errors.New("Could not base64 decode a passed decryption key password") - } - } - var key string - isPrivKey, err := IsPrivateKey(keyData, password) - if IsPasswordError(err) { - return nil, err - } - if isPrivKey { - key = "privkeys" - if _, ok := dcparameters["privkeys-passwords"]; !ok { - dcparameters["privkeys-passwords"] = [][]byte{password} - } else { - dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password) - } - } else if IsCertificate(keyData) { - key = "x509s" - } else if IsGPGPrivateKeyRing(keyData) { - key = "gpg-privatekeys" - } - if key != "" { - values := dcparameters[key] - if values == nil { - dcparameters[key] = [][]byte{keyData} - } else { - dcparameters[key] = append(dcparameters[key], keyData) - } - } else { - return nil, errors.New("Unknown decryption key type") - } - } - - return dcparameters, nil -} diff --git a/vendor/github.com/coreos/go-systemd/v22/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE deleted file mode 100644 index 37ec93a..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/v22/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE deleted file mode 100644 index 23a0ada..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go deleted file mode 100644 index ba4ae31..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 Docker, Inc. -// Copyright 2015-2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package daemon provides a Go implementation of the sd_notify protocol. -// It can be used to inform systemd of service start-up completion, watchdog -// events, and other status changes. -// -// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description -package daemon - -import ( - "net" - "os" -) - -const ( - // SdNotifyReady tells the service manager that service startup is finished - // or the service finished loading its configuration. - SdNotifyReady = "READY=1" - - // SdNotifyStopping tells the service manager that the service is beginning - // its shutdown. - SdNotifyStopping = "STOPPING=1" - - // SdNotifyReloading tells the service manager that this service is - // reloading its configuration. Note that you must call SdNotifyReady when - // it completed reloading. - SdNotifyReloading = "RELOADING=1" - - // SdNotifyWatchdog tells the service manager to update the watchdog - // timestamp for the service. - SdNotifyWatchdog = "WATCHDOG=1" -) - -// SdNotify sends a message to the init daemon. It is common to ignore the error. -// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET` -// will be unconditionally unset. -// -// It returns one of the following: -// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) -// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) -// (true, nil) - notification supported, data has been sent -func SdNotify(unsetEnvironment bool, state string) (bool, error) { - socketAddr := &net.UnixAddr{ - Name: os.Getenv("NOTIFY_SOCKET"), - Net: "unixgram", - } - - // NOTIFY_SOCKET not set - if socketAddr.Name == "" { - return false, nil - } - - if unsetEnvironment { - if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil { - return false, err - } - } - - conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) - // Error connecting to NOTIFY_SOCKET - if err != nil { - return false, err - } - defer conn.Close() - - if _, err = conn.Write([]byte(state)); err != nil { - return false, err - } - return true, nil -} diff --git a/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go deleted file mode 100644 index 7a0e0d3..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package daemon - -import ( - "fmt" - "os" - "strconv" - "time" -) - -// SdWatchdogEnabled returns watchdog information for a service. -// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every -// time / 2. -// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and -// `WATCHDOG_PID` will be unconditionally unset. -// -// It returns one of the following: -// (0, nil) - watchdog isn't enabled or we aren't the watched PID. -// (0, err) - an error happened (e.g. error converting time). -// (time, nil) - watchdog is enabled and we can send ping. -// time is delay before inactive service will be killed. -func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) { - wusec := os.Getenv("WATCHDOG_USEC") - wpid := os.Getenv("WATCHDOG_PID") - if unsetEnvironment { - wusecErr := os.Unsetenv("WATCHDOG_USEC") - wpidErr := os.Unsetenv("WATCHDOG_PID") - if wusecErr != nil { - return 0, wusecErr - } - if wpidErr != nil { - return 0, wpidErr - } - } - - if wusec == "" { - return 0, nil - } - s, err := strconv.Atoi(wusec) - if err != nil { - return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err) - } - if s <= 0 { - return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number") - } - interval := time.Duration(s) * time.Microsecond - - if wpid == "" { - return interval, nil - } - p, err := strconv.Atoi(wpid) - if err != nil { - return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err) - } - if os.Getpid() != p { - return 0, nil - } - - return interval, nil -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go deleted file mode 100644 index cff5af1..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ -package dbus - -import ( - "context" - "encoding/hex" - "fmt" - "os" - "strconv" - "strings" - "sync" - - "github.com/godbus/dbus/v5" -) - -const ( - alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` - num = `0123456789` - alphanum = alpha + num - signalBuffer = 100 -) - -// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped -func needsEscape(i int, b byte) bool { - // Escape everything that is not a-z-A-Z-0-9 - // Also escape 0-9 if it's the first character - return strings.IndexByte(alphanum, b) == -1 || - (i == 0 && strings.IndexByte(num, b) != -1) -} - -// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the -// rules that systemd uses for serializing special characters. -func PathBusEscape(path string) string { - // Special case the empty string - if len(path) == 0 { - return "_" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if needsEscape(i, c) { - e := fmt.Sprintf("_%x", c) - n = append(n, []byte(e)...) - } else { - n = append(n, c) - } - } - return string(n) -} - -// pathBusUnescape is the inverse of PathBusEscape. -func pathBusUnescape(path string) string { - if path == "_" { - return "" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if c == '_' && i+2 < len(path) { - res, err := hex.DecodeString(path[i+1 : i+3]) - if err == nil { - n = append(n, res...) - } - i += 2 - } else { - n = append(n, c) - } - } - return string(n) -} - -// Conn is a connection to systemd's dbus endpoint. -type Conn struct { - // sysconn/sysobj are only used to call dbus methods - sysconn *dbus.Conn - sysobj dbus.BusObject - - // sigconn/sigobj are only used to receive dbus signals - sigconn *dbus.Conn - sigobj dbus.BusObject - - jobListener struct { - jobs map[dbus.ObjectPath]chan<- string - sync.Mutex - } - subStateSubscriber struct { - updateCh chan<- *SubStateUpdate - errCh chan<- error - sync.Mutex - ignore map[dbus.ObjectPath]int64 - cleanIgnore int64 - } - propertiesSubscriber struct { - updateCh chan<- *PropertiesUpdate - errCh chan<- error - sync.Mutex - } -} - -// Deprecated: use NewWithContext instead. -func New() (*Conn, error) { - return NewWithContext(context.Background()) -} - -// NewWithContext establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. -func NewWithContext(ctx context.Context) (*Conn, error) { - conn, err := NewSystemConnectionContext(ctx) - if err != nil && os.Geteuid() == 0 { - return NewSystemdConnectionContext(ctx) - } - return conn, err -} - -// Deprecated: use NewSystemConnectionContext instead. -func NewSystemConnection() (*Conn, error) { - return NewSystemConnectionContext(context.Background()) -} - -// NewSystemConnectionContext establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection. -func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) - }) -} - -// Deprecated: use NewUserConnectionContext instead. -func NewUserConnection() (*Conn, error) { - return NewUserConnectionContext(context.Background()) -} - -// NewUserConnectionContext establishes a connection to the session bus and -// authenticates. This can be used to connect to systemd user instances. -// Callers should call Close() when done with the connection. -func NewUserConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) - }) -} - -// Deprecated: use NewSystemdConnectionContext instead. -func NewSystemdConnection() (*Conn, error) { - return NewSystemdConnectionContext(context.Background()) -} - -// NewSystemdConnectionContext establishes a private, direct connection to systemd. -// This can be used for communicating with systemd without a dbus daemon. -// Callers should call Close() when done with the connection. -func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - // We skip Hello when talking directly to systemd. - return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { - return dbus.Dial("unix:path=/run/systemd/private", opts...) - }) - }) -} - -// Close closes an established connection. -func (c *Conn) Close() { - c.sysconn.Close() - c.sigconn.Close() -} - -// NewConnection establishes a connection to a bus using a caller-supplied function. -// This allows connecting to remote buses through a user-supplied mechanism. -// The supplied function may be called multiple times, and should return independent connections. -// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, -// and any authentication should be handled by the function. -func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { - sysconn, err := dialBus() - if err != nil { - return nil, err - } - - sigconn, err := dialBus() - if err != nil { - sysconn.Close() - return nil, err - } - - c := &Conn{ - sysconn: sysconn, - sysobj: systemdObject(sysconn), - sigconn: sigconn, - sigobj: systemdObject(sigconn), - } - - c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) - c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) - - // Setup the listeners on jobs so that we can get completions - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - - c.dispatch() - return c, nil -} - -// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager -// interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html. -func (c *Conn) GetManagerProperty(prop string) (string, error) { - variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) - if err != nil { - return "", err - } - return variant.String(), nil -} - -func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus(dbus.WithContext(ctx)) - if err != nil { - return nil, err - } - - // Only use EXTERNAL method, and hardcode the uid (not username) - // to avoid a username lookup (which requires a dynamically linked - // libc) - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := dbusAuthConnection(ctx, createBus) - if err != nil { - return nil, err - } - - if err = conn.Hello(); err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func systemdObject(conn *dbus.Conn) dbus.BusObject { - return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go deleted file mode 100644 index fa04afc..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright 2015, 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "context" - "errors" - "fmt" - "path" - "strconv" - - "github.com/godbus/dbus/v5" -) - -// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API -type Who string - -const ( - // All sends the signal to all processes in the unit - All Who = "all" - // Main sends the signal to the main process of the unit - Main Who = "main" - // Control sends the signal to the control process of the unit - Control Who = "control" -) - -func (c *Conn) jobComplete(signal *dbus.Signal) { - var id uint32 - var job dbus.ObjectPath - var unit string - var result string - dbus.Store(signal.Body, &id, &job, &unit, &result) - c.jobListener.Lock() - out, ok := c.jobListener.jobs[job] - if ok { - out <- result - delete(c.jobListener.jobs, job) - } - c.jobListener.Unlock() -} - -func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { - if ch != nil { - c.jobListener.Lock() - defer c.jobListener.Unlock() - } - - var p dbus.ObjectPath - err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) - if err != nil { - return 0, err - } - - if ch != nil { - c.jobListener.jobs[p] = ch - } - - // ignore error since 0 is fine if conversion fails - jobID, _ := strconv.Atoi(path.Base(string(p))) - - return jobID, nil -} - -// Deprecated: use StartUnitContext instead. -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.StartUnitContext(context.Background(), name, mode, ch) -} - -// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise -// specified by the mode string). -// -// Takes the unit to activate, plus a mode string. The mode needs to be one of -// replace, fail, isolate, ignore-dependencies, ignore-requirements. If -// "replace" the call will start the unit and its dependencies, possibly -// replacing already queued jobs that conflict with this. If "fail" the call -// will start the unit and its dependencies, but will fail if this would change -// an already queued job. If "isolate" the call will start the unit in question -// and terminate all units that aren't dependencies of it. If -// "ignore-dependencies" it will start a unit but ignore all its dependencies. -// If "ignore-requirements" it will start a unit but only ignore the -// requirement dependencies. It is not recommended to make use of the latter -// two options. -// -// If the provided channel is non-nil, a result string will be sent to it upon -// job completion: one of done, canceled, timeout, failed, dependency, skipped. -// done indicates successful execution of a job. canceled indicates that a job -// has been canceled before it finished execution. timeout indicates that the -// job timeout was reached. failed indicates that the job failed. dependency -// indicates that a job this job has been depending on failed and the job hence -// has been removed too. skipped indicates that a job was skipped because it -// didn't apply to the units current state. -// -// If no error occurs, the ID of the underlying systemd job will be returned. There -// does exist the possibility for no error to be returned, but for the returned job -// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint -// should not be considered authoritative. -// -// If an error does occur, it will be returned to the user alongside a job ID of 0. -func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) -} - -// Deprecated: use StopUnitContext instead. -func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { - return c.StopUnitContext(context.Background(), name, mode, ch) -} - -// StopUnitContext is similar to StartUnitContext, but stops the specified unit -// rather than starting it. -func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) -} - -// Deprecated: use ReloadUnitContext instead. -func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadUnitContext(context.Background(), name, mode, ch) -} - -// ReloadUnitContext reloads a unit. Reloading is done only if the unit -// is already running, and fails otherwise. -func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) -} - -// Deprecated: use RestartUnitContext instead. -func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.RestartUnitContext(context.Background(), name, mode, ch) -} - -// RestartUnitContext restarts a service. If a service is restarted that isn't -// running it will be started. -func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) -} - -// Deprecated: use TryRestartUnitContext instead. -func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.TryRestartUnitContext(context.Background(), name, mode, ch) -} - -// TryRestartUnitContext is like RestartUnitContext, except that a service that -// isn't running is not affected by the restart. -func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) -} - -// Deprecated: use ReloadOrRestartUnitContext instead. -func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) -} - -// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use -// a restart otherwise. -func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) -} - -// Deprecated: use ReloadOrTryRestartUnitContext instead. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) -} - -// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, -// and use a "Try" flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) -} - -// Deprecated: use StartTransientUnitContext instead. -func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) -} - -// StartTransientUnitContext may be used to create and start a transient unit, which -// will be released as soon as it is not running or referenced anymore or the -// system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnitContext, properties contains properties -// of the unit. -func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) -} - -// Deprecated: use KillUnitContext instead. -func (c *Conn) KillUnit(name string, signal int32) { - c.KillUnitContext(context.Background(), name, signal) -} - -// KillUnitContext takes the unit name and a UNIX signal number to send. -// All of the unit's processes are killed. -func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { - c.KillUnitWithTarget(ctx, name, All, signal) -} - -// KillUnitWithTarget is like KillUnitContext, but allows you to specify which -// process in the unit to send the signal to. -func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() -} - -// Deprecated: use ResetFailedUnitContext instead. -func (c *Conn) ResetFailedUnit(name string) error { - return c.ResetFailedUnitContext(context.Background(), name) -} - -// ResetFailedUnitContext resets the "failed" state of a specific unit. -func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() -} - -// Deprecated: use SystemStateContext instead. -func (c *Conn) SystemState() (*Property, error) { - return c.SystemStateContext(context.Background()) -} - -// SystemStateContext returns the systemd state. Equivalent to -// systemctl is-system-running. -func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { - var err error - var prop dbus.Variant - - obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: "SystemState", Value: prop}, nil -} - -// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. -func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { - var err error - var props map[string]dbus.Variant - - if !path.IsValid() { - return nil, fmt.Errorf("invalid unit name: %v", path) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) - if err != nil { - return nil, err - } - - out := make(map[string]interface{}, len(props)) - for k, v := range props { - out[k] = v.Value() - } - - return out, nil -} - -// Deprecated: use GetUnitPropertiesContext instead. -func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { - return c.GetUnitPropertiesContext(context.Background(), unit) -} - -// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of -// its dbus object properties. -func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") -} - -// Deprecated: use GetUnitPathPropertiesContext instead. -func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { - return c.GetUnitPathPropertiesContext(context.Background(), path) -} - -// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all -// of its dbus object properties. -func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { - return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") -} - -// Deprecated: use GetAllPropertiesContext instead. -func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { - return c.GetAllPropertiesContext(context.Background(), unit) -} - -// GetAllPropertiesContext takes the (unescaped) unit name and returns all of -// its dbus object properties. -func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "") -} - -func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { - var err error - var prop dbus.Variant - - path := unitPath(unit) - if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: propertyName, Value: prop}, nil -} - -// Deprecated: use GetUnitPropertyContext instead. -func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { - return c.GetUnitPropertyContext(context.Background(), unit, propertyName) -} - -// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, -// and returns the property value. -func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { - return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) -} - -// Deprecated: use GetServicePropertyContext instead. -func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { - return c.GetServicePropertyContext(context.Background(), service, propertyName) -} - -// GetServiceProperty returns property for given service name and property name. -func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { - return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) -} - -// Deprecated: use GetUnitTypePropertiesContext instead. -func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { - return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) -} - -// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. -// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. -func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) -} - -// Deprecated: use SetUnitPropertiesContext instead. -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) -} - -// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. -// Not all properties may be changed at runtime, but many resource management -// settings (primarily those in systemd.cgroup(5)) may. The changes are applied -// instantly, and stored on disk for future boots, unless runtime is true, in which -// case the settings only apply until the next reboot. name is the name of the unit -// to modify. properties are the settings to set, encoded as an array of property -// name and value pairs. -func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() -} - -// Deprecated: use GetUnitTypePropertyContext instead. -func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) -} - -// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, -// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. -func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) -} - -type UnitStatus struct { - Name string // The primary unit name as string - Description string // The human readable description string - LoadState string // The load state (i.e. whether the unit file has been loaded successfully) - ActiveState string // The active state (i.e. whether the unit is currently started or not) - SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) - Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. - Path dbus.ObjectPath // The unit object path - JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise - JobType string // The job type as string - JobPath dbus.ObjectPath // The job object path -} - -type storeFunc func(retvalues ...interface{}) error - -func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]UnitStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - err = dbus.Store(resultInterface, statusInterface...) - if err != nil { - return nil, err - } - - return status, nil -} - -// Deprecated: use ListUnitsContext instead. -func (c *Conn) ListUnits() ([]UnitStatus, error) { - return c.ListUnitsContext(context.Background()) -} - -// ListUnitsContext returns an array with all currently loaded units. Note that -// units may be known by multiple names at the same time, and hence there might -// be more unit names loaded than actual units behind them. -// Also note that a unit is only loaded if it is active and/or enabled. -// Units that are both disabled and inactive will thus not be returned. -func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) -} - -// Deprecated: use ListUnitsFilteredContext instead. -func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { - return c.ListUnitsFilteredContext(context.Background(), states) -} - -// ListUnitsFilteredContext returns an array with units filtered by state. -// It takes a list of units' statuses to filter. -func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) -} - -// Deprecated: use ListUnitsByPatternsContext instead. -func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { - return c.ListUnitsByPatternsContext(context.Background(), states, patterns) -} - -// ListUnitsByPatternsContext returns an array with units. -// It takes a list of units' statuses and names to filter. -// Note that units may be known by multiple names at the same time, -// and hence there might be more unit names loaded than actual units behind them. -func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) -} - -// Deprecated: use ListUnitsByNamesContext instead. -func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { - return c.ListUnitsByNamesContext(context.Background(), units) -} - -// ListUnitsByNamesContext returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext -// method, this method returns statuses even for inactive or non-existing -// units. Input array should contain exact unit names, but not patterns. -// -// Requires systemd v230 or higher. -func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) -} - -type UnitFile struct { - Path string - Type string -} - -func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - files := make([]UnitFile, len(result)) - fileInterface := make([]interface{}, len(files)) - for i := range files { - fileInterface[i] = &files[i] - } - - err = dbus.Store(resultInterface, fileInterface...) - if err != nil { - return nil, err - } - - return files, nil -} - -// Deprecated: use ListUnitFilesContext instead. -func (c *Conn) ListUnitFiles() ([]UnitFile, error) { - return c.ListUnitFilesContext(context.Background()) -} - -// ListUnitFiles returns an array of all available units on disk. -func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) -} - -// Deprecated: use ListUnitFilesByPatternsContext instead. -func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { - return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) -} - -// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. -func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) -} - -type LinkUnitFileChange EnableUnitFileChange - -// Deprecated: use LinkUnitFilesContext instead. -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - return c.LinkUnitFilesContext(context.Background(), files, runtime, force) -} - -// LinkUnitFilesContext links unit files (that are located outside of the -// usual unit search paths) into the unit search path. -// -// It takes a list of absolute paths to unit files to link and two -// booleans. -// -// The first boolean controls whether the unit shall be -// enabled for runtime only (true, /run), or persistently (false, -// /etc). -// -// The second controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns a list of the changes made. The list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -// Deprecated: use EnableUnitFilesContext instead. -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - return c.EnableUnitFilesContext(context.Background(), files, runtime, force) -} - -// EnableUnitFilesContext may be used to enable one or more units in the system -// (by creating symlinks to them in /etc or /run). -// -// It takes a list of unit files to enable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and two booleans: the first controls whether the unit shall -// be enabled for runtime only (true, /run), or persistently (false, /etc). -// The second one controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns one boolean and an array with the changes made. The -// boolean signals whether the unit files contained any enablement -// information (i.e. an [Install]) section. The changes list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - var carries_install_info bool - - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) - if err != nil { - return false, nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return false, nil, err - } - - return carries_install_info, changes, nil -} - -type EnableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use DisableUnitFilesContext instead. -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { - return c.DisableUnitFilesContext(context.Background(), files, runtime) -} - -// DisableUnitFilesContext may be used to disable one or more units in the -// system (by removing symlinks to them from /etc or /run). -// -// It takes a list of unit files to disable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and one boolean: whether the unit was enabled for runtime -// only (true, /run), or persistently (false, /etc). -// -// This call returns an array with the changes made. The changes list -// consists of structures with three strings: the type of the change (one of -// symlink or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type DisableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use MaskUnitFilesContext instead. -func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - return c.MaskUnitFilesContext(context.Background(), files, runtime, force) -} - -// MaskUnitFilesContext masks one or more units in the system. -// -// The files argument contains a list of units to mask (either just file names -// or full absolute paths if the unit files are residing outside the usual unit -// search paths). -// -// The runtime argument is used to specify whether the unit was enabled for -// runtime only (true, /run/systemd/..), or persistently (false, -// /etc/systemd/..). -func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type MaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use UnmaskUnitFilesContext instead. -func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - return c.UnmaskUnitFilesContext(context.Background(), files, runtime) -} - -// UnmaskUnitFilesContext unmasks one or more units in the system. -// -// It takes the list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside the usual unit search -// paths), and a boolean runtime flag to specify whether the unit was enabled -// for runtime only (true, /run/systemd/..), or persistently (false, -// /etc/systemd/..). -func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type UnmaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use ReloadContext instead. -func (c *Conn) Reload() error { - return c.ReloadContext(context.Background()) -} - -// ReloadContext instructs systemd to scan for and reload unit files. This is -// an equivalent to systemctl daemon-reload. -func (c *Conn) ReloadContext(ctx context.Context) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() -} - -func unitPath(name string) dbus.ObjectPath { - return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) -} - -// unitName returns the unescaped base element of the supplied escaped path. -func unitName(dpath dbus.ObjectPath) string { - return pathBusUnescape(path.Base(string(dpath))) -} - -// JobStatus holds a currently queued job definition. -type JobStatus struct { - Id uint32 // The numeric job id - Unit string // The primary unit name for this job - JobType string // The job type as string - Status string // The job state as string - JobPath dbus.ObjectPath // The job object path - UnitPath dbus.ObjectPath // The unit object path -} - -// Deprecated: use ListJobsContext instead. -func (c *Conn) ListJobs() ([]JobStatus, error) { - return c.ListJobsContext(context.Background()) -} - -// ListJobsContext returns an array with all currently queued jobs. -func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { - return c.listJobsInternal(ctx) -} - -func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { - result := make([][]interface{}, 0) - if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]JobStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - if err := dbus.Store(resultInterface, statusInterface...); err != nil { - return nil, err - } - - return status, nil -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go deleted file mode 100644 index fb42b62..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "github.com/godbus/dbus/v5" -) - -// From the systemd docs: -// -// The properties array of StartTransientUnit() may take many of the settings -// that may also be configured in unit files. Not all parameters are currently -// accepted though, but we plan to cover more properties with future release. -// Currently you may set the Description, Slice and all dependency types of -// units, as well as RemainAfterExit, ExecStart for service units, -// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, -// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, -// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, -// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map -// directly to their counterparts in unit files and as normal D-Bus object -// properties. The exception here is the PIDs field of scope units which is -// used for construction of the scope only and specifies the initial PIDs to -// add to the scope object. - -type Property struct { - Name string - Value dbus.Variant -} - -type PropertyCollection struct { - Name string - Properties []Property -} - -type execStart struct { - Path string // the binary path to execute - Args []string // an array with all arguments to pass to the executed command, starting with argument 0 - UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly -} - -// PropExecStart sets the ExecStart service property. The first argument is a -// slice with the binary path to execute followed by the arguments to pass to -// the executed command. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= -func PropExecStart(command []string, uncleanIsFailure bool) Property { - execStarts := []execStart{ - { - Path: command[0], - Args: command, - UncleanIsFailure: uncleanIsFailure, - }, - } - - return Property{ - Name: "ExecStart", - Value: dbus.MakeVariant(execStarts), - } -} - -// PropRemainAfterExit sets the RemainAfterExit service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= -func PropRemainAfterExit(b bool) Property { - return Property{ - Name: "RemainAfterExit", - Value: dbus.MakeVariant(b), - } -} - -// PropType sets the Type service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= -func PropType(t string) Property { - return Property{ - Name: "Type", - Value: dbus.MakeVariant(t), - } -} - -// PropDescription sets the Description unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= -func PropDescription(desc string) Property { - return Property{ - Name: "Description", - Value: dbus.MakeVariant(desc), - } -} - -func propDependency(name string, units []string) Property { - return Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -// PropRequires sets the Requires unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= -func PropRequires(units ...string) Property { - return propDependency("Requires", units) -} - -// PropRequiresOverridable sets the RequiresOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= -func PropRequiresOverridable(units ...string) Property { - return propDependency("RequiresOverridable", units) -} - -// PropRequisite sets the Requisite unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= -func PropRequisite(units ...string) Property { - return propDependency("Requisite", units) -} - -// PropRequisiteOverridable sets the RequisiteOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= -func PropRequisiteOverridable(units ...string) Property { - return propDependency("RequisiteOverridable", units) -} - -// PropWants sets the Wants unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= -func PropWants(units ...string) Property { - return propDependency("Wants", units) -} - -// PropBindsTo sets the BindsTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= -func PropBindsTo(units ...string) Property { - return propDependency("BindsTo", units) -} - -// PropRequiredBy sets the RequiredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= -func PropRequiredBy(units ...string) Property { - return propDependency("RequiredBy", units) -} - -// PropRequiredByOverridable sets the RequiredByOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= -func PropRequiredByOverridable(units ...string) Property { - return propDependency("RequiredByOverridable", units) -} - -// PropWantedBy sets the WantedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= -func PropWantedBy(units ...string) Property { - return propDependency("WantedBy", units) -} - -// PropBoundBy sets the BoundBy unit property. See -// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= -func PropBoundBy(units ...string) Property { - return propDependency("BoundBy", units) -} - -// PropConflicts sets the Conflicts unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= -func PropConflicts(units ...string) Property { - return propDependency("Conflicts", units) -} - -// PropConflictedBy sets the ConflictedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= -func PropConflictedBy(units ...string) Property { - return propDependency("ConflictedBy", units) -} - -// PropBefore sets the Before unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= -func PropBefore(units ...string) Property { - return propDependency("Before", units) -} - -// PropAfter sets the After unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= -func PropAfter(units ...string) Property { - return propDependency("After", units) -} - -// PropOnFailure sets the OnFailure unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= -func PropOnFailure(units ...string) Property { - return propDependency("OnFailure", units) -} - -// PropTriggers sets the Triggers unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= -func PropTriggers(units ...string) Property { - return propDependency("Triggers", units) -} - -// PropTriggeredBy sets the TriggeredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= -func PropTriggeredBy(units ...string) Property { - return propDependency("TriggeredBy", units) -} - -// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= -func PropPropagatesReloadTo(units ...string) Property { - return propDependency("PropagatesReloadTo", units) -} - -// PropRequiresMountsFor sets the RequiresMountsFor unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= -func PropRequiresMountsFor(units ...string) Property { - return propDependency("RequiresMountsFor", units) -} - -// PropSlice sets the Slice unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= -func PropSlice(slice string) Property { - return Property{ - Name: "Slice", - Value: dbus.MakeVariant(slice), - } -} - -// PropPids sets the PIDs field of scope units used in the initial construction -// of the scope only and specifies the initial PIDs to add to the scope object. -// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties -func PropPids(pids ...uint32) Property { - return Property{ - Name: "PIDs", - Value: dbus.MakeVariant(pids), - } -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go deleted file mode 100644 index 17c5d48..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -type set struct { - data map[string]bool -} - -func (s *set) Add(value string) { - s.data[value] = true -} - -func (s *set) Remove(value string) { - delete(s.data, value) -} - -func (s *set) Contains(value string) (exists bool) { - _, exists = s.data[value] - return -} - -func (s *set) Length() int { - return len(s.data) -} - -func (s *set) Values() (values []string) { - for val := range s.data { - values = append(values, val) - } - return -} - -func newSet() *set { - return &set{make(map[string]bool)} -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go deleted file mode 100644 index 7e370fe..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "log" - "time" - - "github.com/godbus/dbus/v5" -) - -const ( - cleanIgnoreInterval = int64(10 * time.Second) - ignoreInterval = int64(30 * time.Millisecond) -) - -// Subscribe sets up this connection to subscribe to all systemd dbus events. -// This is required before calling SubscribeUnits. When the connection closes -// systemd will automatically stop sending signals so there is no need to -// explicitly call Unsubscribe(). -func (c *Conn) Subscribe() error { - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() -} - -// Unsubscribe this connection from systemd dbus events. -func (c *Conn) Unsubscribe() error { - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() -} - -func (c *Conn) dispatch() { - ch := make(chan *dbus.Signal, signalBuffer) - - c.sigconn.Signal(ch) - - go func() { - for { - signal, ok := <-ch - if !ok { - return - } - - if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { - c.jobComplete(signal) - } - - if c.subStateSubscriber.updateCh == nil && - c.propertiesSubscriber.updateCh == nil { - continue - } - - var unitPath dbus.ObjectPath - switch signal.Name { - case "org.freedesktop.systemd1.Manager.JobRemoved": - unitName := signal.Body[2].(string) - c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - case "org.freedesktop.systemd1.Manager.UnitNew": - unitPath = signal.Body[1].(dbus.ObjectPath) - case "org.freedesktop.DBus.Properties.PropertiesChanged": - if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - unitPath = signal.Path - - if len(signal.Body) >= 2 { - if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { - c.sendPropertiesUpdate(unitPath, changed) - } - } - } - } - - if unitPath == dbus.ObjectPath("") { - continue - } - - c.sendSubStateUpdate(unitPath) - } - }() -} - -// SubscribeUnits returns two unbuffered channels which will receive all changed units every -// interval. Deleted units are sent as nil. -func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { - return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) -} - -// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer -// size of the channels, the comparison function for detecting changes and a filter -// function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { - old := make(map[string]*UnitStatus) - statusChan := make(chan map[string]*UnitStatus, buffer) - errChan := make(chan error, buffer) - - go func() { - for { - timerChan := time.After(interval) - - units, err := c.ListUnits() - if err == nil { - cur := make(map[string]*UnitStatus) - for i := range units { - if filterUnit != nil && filterUnit(units[i].Name) { - continue - } - cur[units[i].Name] = &units[i] - } - - // add all new or changed units - changed := make(map[string]*UnitStatus) - for n, u := range cur { - if oldU, ok := old[n]; !ok || isChanged(oldU, u) { - changed[n] = u - } - delete(old, n) - } - - // add all deleted units - for oldN := range old { - changed[oldN] = nil - } - - old = cur - - if len(changed) != 0 { - statusChan <- changed - } - } else { - errChan <- err - } - - <-timerChan - } - }() - - return statusChan, errChan -} - -type SubStateUpdate struct { - UnitName string - SubState string -} - -// SetSubStateSubscriber writes to updateCh when any unit's substate changes. -// Although this writes to updateCh on every state change, the reported state -// may be more recent than the change that generated it (due to an unavoidable -// race in the systemd dbus interface). That is, this method provides a good -// way to keep a current view of all units' states, but is not guaranteed to -// show every state transition they go through. Furthermore, state changes -// will only be written to the channel with non-blocking writes. If updateCh -// is full, it attempts to write an error to errCh; if errCh is full, the error -// passes silently. -func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { - if c == nil { - msg := "nil receiver" - select { - case errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - c.subStateSubscriber.updateCh = updateCh - c.subStateSubscriber.errCh = errCh -} - -func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - - if c.subStateSubscriber.updateCh == nil { - return - } - - isIgnored := c.shouldIgnore(unitPath) - defer c.cleanIgnore() - if isIgnored { - return - } - - info, err := c.GetUnitPathProperties(unitPath) - if err != nil { - select { - case c.subStateSubscriber.errCh <- err: - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - defer c.updateIgnore(unitPath, info) - - name, ok := info["Id"].(string) - if !ok { - msg := "failed to cast info.Id" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - substate, ok := info["SubState"].(string) - if !ok { - msg := "failed to cast info.SubState" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - update := &SubStateUpdate{name, substate} - select { - case c.subStateSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} - -// The ignore functions work around a wart in the systemd dbus interface. -// Requesting the properties of an unloaded unit will cause systemd to send a -// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's -// properties on UnitNew (as that's the only indication of a new unit coming up -// for the first time), we would enter an infinite loop if we did not attempt -// to detect and ignore these spurious signals. The signal themselves are -// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an -// unloaded unit's signals for a short time after requesting its properties. -// This means that we will miss e.g. a transient unit being restarted -// *immediately* upon failure and also a transient unit being started -// immediately after requesting its status (with systemctl status, for example, -// because this causes a UnitNew signal to be sent which then causes us to fetch -// the properties). - -func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { - t, ok := c.subStateSubscriber.ignore[path] - return ok && t >= time.Now().UnixNano() -} - -func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { - loadState, ok := info["LoadState"].(string) - if !ok { - return - } - - // unit is unloaded - it will trigger bad systemd dbus behavior - if loadState == "not-found" { - c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval - } -} - -// without this, ignore would grow unboundedly over time -func (c *Conn) cleanIgnore() { - now := time.Now().UnixNano() - if c.subStateSubscriber.cleanIgnore < now { - c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval - - for p, t := range c.subStateSubscriber.ignore { - if t < now { - delete(c.subStateSubscriber.ignore, p) - } - } - } -} - -// PropertiesUpdate holds a map of a unit's changed properties -type PropertiesUpdate struct { - UnitName string - Changed map[string]dbus.Variant -} - -// SetPropertiesSubscriber writes to updateCh when any unit's properties -// change. Every property change reported by systemd will be sent; that is, no -// transitions will be "missed" (as they might be with SetSubStateSubscriber). -// However, state changes will only be written to the channel with non-blocking -// writes. If updateCh is full, it attempts to write an error to errCh; if -// errCh is full, the error passes silently. -func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - c.propertiesSubscriber.updateCh = updateCh - c.propertiesSubscriber.errCh = errCh -} - -// we don't need to worry about shouldIgnore() here because -// sendPropertiesUpdate doesn't call GetProperties() -func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - - if c.propertiesSubscriber.updateCh == nil { - return - } - - update := &PropertiesUpdate{unitName(unitPath), changedProps} - - select { - case c.propertiesSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.propertiesSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go deleted file mode 100644 index 5b408d5..0000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "time" -) - -// SubscriptionSet returns a subscription set which is like conn.Subscribe but -// can filter to only return events for a set of units. -type SubscriptionSet struct { - *set - conn *Conn -} - -func (s *SubscriptionSet) filter(unit string) bool { - return !s.Contains(unit) -} - -// Subscribe starts listening for dbus events for all of the units in the set. -// Returns channels identical to conn.SubscribeUnits. -func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { - // TODO: Make fully evented by using systemd 209 with properties changed values - return s.conn.SubscribeUnitsCustom(time.Second, 0, - mismatchUnitStatus, - func(unit string) bool { return s.filter(unit) }, - ) -} - -// NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { - return &SubscriptionSet{newSet(), conn} -} - -// mismatchUnitStatus returns true if the provided UnitStatus objects -// are not equivalent. false is returned if the objects are equivalent. -// Only the Name, Description and state-related fields are used in -// the comparison. -func mismatchUnitStatus(u1, u2 *UnitStatus) bool { - return u1.Name != u2.Name || - u1.Description != u2.Description || - u1.LoadState != u2.LoadState || - u1.ActiveState != u2.ActiveState || - u1.SubState != u2.SubState -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md deleted file mode 100644 index 1cade6c..0000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go deleted file mode 100644 index b480056..0000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ /dev/null @@ -1,14 +0,0 @@ -package md2man - -import ( - "github.com/russross/blackfriday/v2" -) - -// Render converts a markdown document into a roff formatted document. -func Render(doc []byte) []byte { - renderer := NewRoffRenderer() - - return blackfriday.Run(doc, - []blackfriday.Option{blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions())}...) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go deleted file mode 100644 index 0668a66..0000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ /dev/null @@ -1,345 +0,0 @@ -package md2man - -import ( - "fmt" - "io" - "os" - "strings" - - "github.com/russross/blackfriday/v2" -) - -// roffRenderer implements the blackfriday.Renderer interface for creating -// roff format (manpages) from markdown text -type roffRenderer struct { - extensions blackfriday.Extensions - listCounters []int - firstHeader bool - defineTerm bool - listDepth int -} - -const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB\\fC" - codespanCloseTag = "\\fR" - codeTag = "\n.PP\n.RS\n\n.nf\n" - codeCloseTag = "\n.fi\n.RE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" -) - -// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents -// from markdown -func NewRoffRenderer() *roffRenderer { // nolint: golint - var extensions blackfriday.Extensions - - extensions |= blackfriday.NoIntraEmphasis - extensions |= blackfriday.Tables - extensions |= blackfriday.FencedCode - extensions |= blackfriday.SpaceHeadings - extensions |= blackfriday.Footnotes - extensions |= blackfriday.Titleblock - extensions |= blackfriday.DefinitionLists - return &roffRenderer{ - extensions: extensions, - } -} - -// GetExtensions returns the list of extensions used by this renderer implementation -func (r *roffRenderer) GetExtensions() blackfriday.Extensions { - return r.extensions -} - -// RenderHeader handles outputting the header at document start -func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { - // disable hyphenation - out(w, ".nh\n") -} - -// RenderFooter handles outputting the footer at the document end; the roff -// renderer has no footer information -func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { -} - -// RenderNode is called for each node in a markdown document; based on the node -// type the equivalent roff output is sent to the writer -func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - - var walkAction = blackfriday.GoToNext - - switch node.Type { - case blackfriday.Text: - r.handleText(w, node, entering) - case blackfriday.Softbreak: - out(w, crTag) - case blackfriday.Hardbreak: - out(w, breakTag) - case blackfriday.Emph: - if entering { - out(w, emphTag) - } else { - out(w, emphCloseTag) - } - case blackfriday.Strong: - if entering { - out(w, strongTag) - } else { - out(w, strongCloseTag) - } - case blackfriday.Link: - if !entering { - out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) - } - case blackfriday.Image: - // ignore images - walkAction = blackfriday.SkipChildren - case blackfriday.Code: - out(w, codespanTag) - escapeSpecialChars(w, node.Literal) - out(w, codespanCloseTag) - case blackfriday.Document: - break - case blackfriday.Paragraph: - // roff .PP markers break lists - if r.listDepth > 0 { - return blackfriday.GoToNext - } - if entering { - out(w, paraTag) - } else { - out(w, crTag) - } - case blackfriday.BlockQuote: - if entering { - out(w, quoteTag) - } else { - out(w, quoteCloseTag) - } - case blackfriday.Heading: - r.handleHeading(w, node, entering) - case blackfriday.HorizontalRule: - out(w, hruleTag) - case blackfriday.List: - r.handleList(w, node, entering) - case blackfriday.Item: - r.handleItem(w, node, entering) - case blackfriday.CodeBlock: - out(w, codeTag) - escapeSpecialChars(w, node.Literal) - out(w, codeCloseTag) - case blackfriday.Table: - r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) - case blackfriday.TableHead: - case blackfriday.TableBody: - case blackfriday.TableRow: - // no action as cell entries do all the nroff formatting - return blackfriday.GoToNext - default: - fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) - } - return walkAction -} - -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - -func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - switch node.Level { - case 1: - if !r.firstHeader { - out(w, titleHeader) - r.firstHeader = true - break - } - out(w, topLevelHeader) - case 2: - out(w, secondLevelHdr) - default: - out(w, otherHeader) - } - } -} - -func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { - openTag := listTag - closeTag := listCloseTag - if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // tags for definition lists handled within Item node - openTag = "" - closeTag = "" - } - if entering { - r.listDepth++ - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = append(r.listCounters, 1) - } - out(w, openTag) - } else { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = r.listCounters[:len(r.listCounters)-1] - } - out(w, closeTag) - r.listDepth-- - } -} - -func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) - r.listCounters[len(r.listCounters)-1]++ - } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true - } else { - r.defineTerm = false - } - } else { - out(w, ".IP \\(bu 2\n") - } - } else { - out(w, "\n") - } -} - -func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced - columns := countColumns(node) - out(w, strings.Repeat("l ", columns)+"\n") - out(w, strings.Repeat("l ", columns)+".\n") - } else { - out(w, tableEnd) - } -} - -func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } - if entering { - if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) - } - } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag - } - out(w, end) - } -} - -// because roff format requires knowing the column count before outputting any table -// data we need to walk a table tree and count the columns -func countColumns(node *blackfriday.Node) int { - var columns int - - node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - switch node.Type { - case blackfriday.TableRow: - if !entering { - return blackfriday.Terminate - } - case blackfriday.TableCell: - if entering { - columns++ - } - default: - } - return blackfriday.GoToNext - }) - return columns -} - -func out(w io.Writer, output string) { - io.WriteString(w, output) // nolint: errcheck -} - -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - -func escapeSpecialChars(w io.Writer, text []byte) { - for i := 0; i < len(text); i++ { - // escape initial apostrophe or period - if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { - out(w, "\\&") - } - - // directly copy normal characters - org := i - - for i < len(text) && !needsBackslash(text[i]) { - i++ - } - if i > org { - w.Write(text[org:i]) // nolint: errcheck - } - - // escape a character - if i >= len(text) { - break - } - - w.Write([]byte{'\\', text[i]}) // nolint: errcheck - } -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 7929947..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/docker/go-events/.gitignore b/vendor/github.com/docker/go-events/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/docker/go-events/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/docker/go-events/CONTRIBUTING.md b/vendor/github.com/docker/go-events/CONTRIBUTING.md deleted file mode 100644 index d813af7..0000000 --- a/vendor/github.com/docker/go-events/CONTRIBUTING.md +++ /dev/null @@ -1,70 +0,0 @@ -# Contributing to Docker open source projects - -Want to hack on go-events? Awesome! Here are instructions to get you started. - -go-events is part of the [Docker](https://www.docker.com) project, and -follows the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -For an in-depth description of our contribution process, visit the -contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/docker/go-events/LICENSE deleted file mode 100644 index 6d630cf..0000000 --- a/vendor/github.com/docker/go-events/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-events/MAINTAINERS b/vendor/github.com/docker/go-events/MAINTAINERS deleted file mode 100644 index e414d82..0000000 --- a/vendor/github.com/docker/go-events/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-events maintainers file -# -# This file describes who runs the docker/go-events project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "aluzzardi", - "lk4d4", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.aluzzardi] - Name = "Andrea Luzzardi" - Email = "al@docker.com" - GitHub = "aluzzardi" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/github.com/docker/go-events/README.md b/vendor/github.com/docker/go-events/README.md deleted file mode 100644 index 0acafc2..0000000 --- a/vendor/github.com/docker/go-events/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# Docker Events Package - -[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) -[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) - -The Docker `events` package implements a composable event distribution package -for Go. - -Originally created to implement the [notifications in Docker Registry -2](https://github.com/docker/distribution/blob/master/docs/notifications.md), -we've found the pattern to be useful in other applications. This package is -most of the same code with slightly updated interfaces. Much of the internals -have been made available. - -## Usage - -The `events` package centers around a `Sink` type. Events are written with -calls to `Sink.Write(event Event)`. Sinks can be wired up in various -configurations to achieve interesting behavior. - -The canonical example is that employed by the -[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) -package. Let's say we have a type `httpSink` where we'd like to queue -notifications. As a rule, it should send a single http request and return an -error if it fails: - -```go -func (h *httpSink) Write(event Event) error { - p, err := json.Marshal(event) - if err != nil { - return err - } - body := bytes.NewReader(p) - resp, err := h.client.Post(h.url, "application/json", body) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.Status != 200 { - return errors.New("unexpected status") - } - - return nil -} - -// implement (*httpSink).Close() -``` - -With just that, we can start using components from this package. One can call -`(*httpSink).Write` to send events as the body of a post request to a -configured URL. - -### Retries - -HTTP can be unreliable. The first feature we'd like is to have some retry: - -```go -hs := newHTTPSink(/*...*/) -retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) -``` - -We now have a sink that will retry events against the `httpSink` until they -succeed. The retry will backoff for one second after 5 consecutive failures -using the breaker strategy. - -### Queues - -This isn't quite enough. We we want a sink that doesn't block while we are -waiting for events to be sent. Let's add a `Queue`: - -```go -queue := NewQueue(retry) -``` - -Now, we have an unbounded queue that will work through all events sent with -`(*Queue).Write`. Events can be added asynchronously to the queue without -blocking the current execution path. This is ideal for use in an http request. - -### Broadcast - -It usually turns out that you want to send to more than one listener. We can -use `Broadcaster` to support this: - -```go -var broadcast = NewBroadcaster() // make it available somewhere in your application. -broadcast.Add(queue) // add your queue! -broadcast.Add(queue2) // and another! -``` - -With the above, we can now call `broadcast.Write` in our http handlers and have -all the events distributed to each queue. Because the events are queued, not -listener blocks another. - -### Extending - -For the most part, the above is sufficient for a lot of applications. However, -extending the above functionality can be done implementing your own `Sink`. The -behavior and semantics of the sink can be completely dependent on the -application requirements. The interface is provided below for reference: - -```go -type Sink { - Write(Event) error - Close() error -} -``` - -Application behavior can be controlled by how `Write` behaves. The examples -above are designed to queue the message and return as quickly as possible. -Other implementations may block until the event is committed to durable -storage. - -## Copyright and license - -Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, -Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go deleted file mode 100644 index 5120078..0000000 --- a/vendor/github.com/docker/go-events/broadcast.go +++ /dev/null @@ -1,178 +0,0 @@ -package events - -import ( - "fmt" - "sync" - - "github.com/sirupsen/logrus" -) - -// Broadcaster sends events to multiple, reliable Sinks. The goal of this -// component is to dispatch events to configured endpoints. Reliability can be -// provided by wrapping incoming sinks. -type Broadcaster struct { - sinks []Sink - events chan Event - adds chan configureRequest - removes chan configureRequest - - shutdown chan struct{} - closed chan struct{} - once sync.Once -} - -// NewBroadcaster appends one or more sinks to the list of sinks. The -// broadcaster behavior will be affected by the properties of the sink. -// Generally, the sink should accept all messages and deal with reliability on -// its own. Use of EventQueue and RetryingSink should be used here. -func NewBroadcaster(sinks ...Sink) *Broadcaster { - b := Broadcaster{ - sinks: sinks, - events: make(chan Event), - adds: make(chan configureRequest), - removes: make(chan configureRequest), - shutdown: make(chan struct{}), - closed: make(chan struct{}), - } - - // Start the broadcaster - go b.run() - - return &b -} - -// Write accepts an event to be dispatched to all sinks. This method will never -// fail and should never block (hopefully!). The caller cedes the memory to the -// broadcaster and should not modify it after calling write. -func (b *Broadcaster) Write(event Event) error { - select { - case b.events <- event: - case <-b.closed: - return ErrSinkClosed - } - return nil -} - -// Add the sink to the broadcaster. -// -// The provided sink must be comparable with equality. Typically, this just -// works with a regular pointer type. -func (b *Broadcaster) Add(sink Sink) error { - return b.configure(b.adds, sink) -} - -// Remove the provided sink. -func (b *Broadcaster) Remove(sink Sink) error { - return b.configure(b.removes, sink) -} - -type configureRequest struct { - sink Sink - response chan error -} - -func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { - response := make(chan error, 1) - - for { - select { - case ch <- configureRequest{ - sink: sink, - response: response}: - ch = nil - case err := <-response: - return err - case <-b.closed: - return ErrSinkClosed - } - } -} - -// Close the broadcaster, ensuring that all messages are flushed to the -// underlying sink before returning. -func (b *Broadcaster) Close() error { - b.once.Do(func() { - close(b.shutdown) - }) - - <-b.closed - return nil -} - -// run is the main broadcast loop, started when the broadcaster is created. -// Under normal conditions, it waits for events on the event channel. After -// Close is called, this goroutine will exit. -func (b *Broadcaster) run() { - defer close(b.closed) - remove := func(target Sink) { - for i, sink := range b.sinks { - if sink == target { - b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) - break - } - } - } - - for { - select { - case event := <-b.events: - for _, sink := range b.sinks { - if err := sink.Write(event); err != nil { - if err == ErrSinkClosed { - // remove closed sinks - remove(sink) - continue - } - logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). - Errorf("broadcaster: dropping event") - } - } - case request := <-b.adds: - // while we have to iterate for add/remove, common iteration for - // send is faster against slice. - - var found bool - for _, sink := range b.sinks { - if request.sink == sink { - found = true - break - } - } - - if !found { - b.sinks = append(b.sinks, request.sink) - } - // b.sinks[request.sink] = struct{}{} - request.response <- nil - case request := <-b.removes: - remove(request.sink) - request.response <- nil - case <-b.shutdown: - // close all the underlying sinks - for _, sink := range b.sinks { - if err := sink.Close(); err != nil && err != ErrSinkClosed { - logrus.WithField("events.sink", sink).WithError(err). - Errorf("broadcaster: closing sink failed") - } - } - return - } - } -} - -func (b *Broadcaster) String() string { - // Serialize copy of this broadcaster without the sync.Once, to avoid - // a data race. - - b2 := map[string]interface{}{ - "sinks": b.sinks, - "events": b.events, - "adds": b.adds, - "removes": b.removes, - - "shutdown": b.shutdown, - "closed": b.closed, - } - - return fmt.Sprint(b2) -} diff --git a/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/docker/go-events/channel.go deleted file mode 100644 index 802cf51..0000000 --- a/vendor/github.com/docker/go-events/channel.go +++ /dev/null @@ -1,61 +0,0 @@ -package events - -import ( - "fmt" - "sync" -) - -// Channel provides a sink that can be listened on. The writer and channel -// listener must operate in separate goroutines. -// -// Consumers should listen on Channel.C until Closed is closed. -type Channel struct { - C chan Event - - closed chan struct{} - once sync.Once -} - -// NewChannel returns a channel. If buffer is zero, the channel is -// unbuffered. -func NewChannel(buffer int) *Channel { - return &Channel{ - C: make(chan Event, buffer), - closed: make(chan struct{}), - } -} - -// Done returns a channel that will always proceed once the sink is closed. -func (ch *Channel) Done() chan struct{} { - return ch.closed -} - -// Write the event to the channel. Must be called in a separate goroutine from -// the listener. -func (ch *Channel) Write(event Event) error { - select { - case ch.C <- event: - return nil - case <-ch.closed: - return ErrSinkClosed - } -} - -// Close the channel sink. -func (ch *Channel) Close() error { - ch.once.Do(func() { - close(ch.closed) - }) - - return nil -} - -func (ch *Channel) String() string { - // Serialize a copy of the Channel that doesn't contain the sync.Once, - // to avoid a data race. - ch2 := map[string]interface{}{ - "C": ch.C, - "closed": ch.closed, - } - return fmt.Sprint(ch2) -} diff --git a/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/docker/go-events/errors.go deleted file mode 100644 index 56db7c2..0000000 --- a/vendor/github.com/docker/go-events/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package events - -import "fmt" - -var ( - // ErrSinkClosed is returned if a write is issued to a sink that has been - // closed. If encountered, the error should be considered terminal and - // retries will not be successful. - ErrSinkClosed = fmt.Errorf("events: sink closed") -) diff --git a/vendor/github.com/docker/go-events/event.go b/vendor/github.com/docker/go-events/event.go deleted file mode 100644 index f0f1d9e..0000000 --- a/vendor/github.com/docker/go-events/event.go +++ /dev/null @@ -1,15 +0,0 @@ -package events - -// Event marks items that can be sent as events. -type Event interface{} - -// Sink accepts and sends events. -type Sink interface { - // Write an event to the Sink. If no error is returned, the caller will - // assume that all events have been committed to the sink. If an error is - // received, the caller may retry sending the event. - Write(event Event) error - - // Close the sink, possibly waiting for pending events to flush. - Close() error -} diff --git a/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/docker/go-events/filter.go deleted file mode 100644 index e6c0eb6..0000000 --- a/vendor/github.com/docker/go-events/filter.go +++ /dev/null @@ -1,52 +0,0 @@ -package events - -// Matcher matches events. -type Matcher interface { - Match(event Event) bool -} - -// MatcherFunc implements matcher with just a function. -type MatcherFunc func(event Event) bool - -// Match calls the wrapped function. -func (fn MatcherFunc) Match(event Event) bool { - return fn(event) -} - -// Filter provides an event sink that sends only events that are accepted by a -// Matcher. No methods on filter are goroutine safe. -type Filter struct { - dst Sink - matcher Matcher - closed bool -} - -// NewFilter returns a new filter that will send to events to dst that return -// true for Matcher. -func NewFilter(dst Sink, matcher Matcher) Sink { - return &Filter{dst: dst, matcher: matcher} -} - -// Write an event to the filter. -func (f *Filter) Write(event Event) error { - if f.closed { - return ErrSinkClosed - } - - if f.matcher.Match(event) { - return f.dst.Write(event) - } - - return nil -} - -// Close the filter and allow no more events to pass through. -func (f *Filter) Close() error { - // TODO(stevvooe): Not all sinks should have Close. - if f.closed { - return nil - } - - f.closed = true - return f.dst.Close() -} diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go deleted file mode 100644 index 4bb770a..0000000 --- a/vendor/github.com/docker/go-events/queue.go +++ /dev/null @@ -1,111 +0,0 @@ -package events - -import ( - "container/list" - "sync" - - "github.com/sirupsen/logrus" -) - -// Queue accepts all messages into a queue for asynchronous consumption -// by a sink. It is unbounded and thread safe but the sink must be reliable or -// events will be dropped. -type Queue struct { - dst Sink - events *list.List - cond *sync.Cond - mu sync.Mutex - closed bool -} - -// NewQueue returns a queue to the provided Sink dst. -func NewQueue(dst Sink) *Queue { - eq := Queue{ - dst: dst, - events: list.New(), - } - - eq.cond = sync.NewCond(&eq.mu) - go eq.run() - return &eq -} - -// Write accepts the events into the queue, only failing if the queue has -// been closed. -func (eq *Queue) Write(event Event) error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - eq.events.PushBack(event) - eq.cond.Signal() // signal waiters - - return nil -} - -// Close shutsdown the event queue, flushing -func (eq *Queue) Close() error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return nil - } - - // set closed flag - eq.closed = true - eq.cond.Signal() // signal flushes queue - eq.cond.Wait() // wait for signal from last flush - return eq.dst.Close() -} - -// run is the main goroutine to flush events to the target sink. -func (eq *Queue) run() { - for { - event := eq.next() - - if event == nil { - return // nil block means event queue is closed. - } - - if err := eq.dst.Write(event); err != nil { - // TODO(aaronl): Dropping events could be bad depending - // on the application. We should have a way of - // communicating this condition. However, logging - // at a log level above debug may not be appropriate. - // Eventually, go-events should not use logrus at all, - // and should bubble up conditions like this through - // error values. - logrus.WithFields(logrus.Fields{ - "event": event, - "sink": eq.dst, - }).WithError(err).Debug("eventqueue: dropped event") - } - } -} - -// next encompasses the critical section of the run loop. When the queue is -// empty, it will block on the condition. If new data arrives, it will wake -// and return a block. When closed, a nil slice will be returned. -func (eq *Queue) next() Event { - eq.mu.Lock() - defer eq.mu.Unlock() - - for eq.events.Len() < 1 { - if eq.closed { - eq.cond.Broadcast() - return nil - } - - eq.cond.Wait() - } - - front := eq.events.Front() - block := front.Value.(Event) - eq.events.Remove(front) - - return block -} diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go deleted file mode 100644 index b7f0a54..0000000 --- a/vendor/github.com/docker/go-events/retry.go +++ /dev/null @@ -1,260 +0,0 @@ -package events - -import ( - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/sirupsen/logrus" -) - -// RetryingSink retries the write until success or an ErrSinkClosed is -// returned. Underlying sink must have p > 0 of succeeding or the sink will -// block. Retry is configured with a RetryStrategy. Concurrent calls to a -// retrying sink are serialized through the sink, meaning that if one is -// in-flight, another will not proceed. -type RetryingSink struct { - sink Sink - strategy RetryStrategy - closed chan struct{} - once sync.Once -} - -// NewRetryingSink returns a sink that will retry writes to a sink, backing -// off on failure. Parameters threshold and backoff adjust the behavior of the -// circuit breaker. -func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { - rs := &RetryingSink{ - sink: sink, - strategy: strategy, - closed: make(chan struct{}), - } - - return rs -} - -// Write attempts to flush the events to the downstream sink until it succeeds -// or the sink is closed. -func (rs *RetryingSink) Write(event Event) error { - logger := logrus.WithField("event", event) - -retry: - select { - case <-rs.closed: - return ErrSinkClosed - default: - } - - if backoff := rs.strategy.Proceed(event); backoff > 0 { - select { - case <-time.After(backoff): - // TODO(stevvooe): This branch holds up the next try. Before, we - // would simply break to the "retry" label and then possibly wait - // again. However, this requires all retry strategies to have a - // large probability of probing the sync for success, rather than - // just backing off and sending the request. - case <-rs.closed: - return ErrSinkClosed - } - } - - if err := rs.sink.Write(event); err != nil { - if err == ErrSinkClosed { - // terminal! - return err - } - - logger := logger.WithError(err) // shadow!! - - if rs.strategy.Failure(event, err) { - logger.Errorf("retryingsink: dropped event") - return nil - } - - logger.Errorf("retryingsink: error writing event, retrying") - goto retry - } - - rs.strategy.Success(event) - return nil -} - -// Close closes the sink and the underlying sink. -func (rs *RetryingSink) Close() error { - rs.once.Do(func() { - close(rs.closed) - }) - - return nil -} - -func (rs *RetryingSink) String() string { - // Serialize a copy of the RetryingSink without the sync.Once, to avoid - // a data race. - rs2 := map[string]interface{}{ - "sink": rs.sink, - "strategy": rs.strategy, - "closed": rs.closed, - } - return fmt.Sprint(rs2) -} - -// RetryStrategy defines a strategy for retrying event sink writes. -// -// All methods should be goroutine safe. -type RetryStrategy interface { - // Proceed is called before every event send. If proceed returns a - // positive, non-zero integer, the retryer will back off by the provided - // duration. - // - // An event is provided, by may be ignored. - Proceed(event Event) time.Duration - - // Failure reports a failure to the strategy. If this method returns true, - // the event should be dropped. - Failure(event Event, err error) bool - - // Success should be called when an event is sent successfully. - Success(event Event) -} - -// Breaker implements a circuit breaker retry strategy. -// -// The current implementation never drops events. -type Breaker struct { - threshold int - recent int - last time.Time - backoff time.Duration // time after which we retry after failure. - mu sync.Mutex -} - -var _ RetryStrategy = &Breaker{} - -// NewBreaker returns a breaker that will backoff after the threshold has been -// tripped. A Breaker is thread safe and may be shared by many goroutines. -func NewBreaker(threshold int, backoff time.Duration) *Breaker { - return &Breaker{ - threshold: threshold, - backoff: backoff, - } -} - -// Proceed checks the failures against the threshold. -func (b *Breaker) Proceed(event Event) time.Duration { - b.mu.Lock() - defer b.mu.Unlock() - - if b.recent < b.threshold { - return 0 - } - - return b.last.Add(b.backoff).Sub(time.Now()) -} - -// Success resets the breaker. -func (b *Breaker) Success(event Event) { - b.mu.Lock() - defer b.mu.Unlock() - - b.recent = 0 - b.last = time.Time{} -} - -// Failure records the failure and latest failure time. -func (b *Breaker) Failure(event Event, err error) bool { - b.mu.Lock() - defer b.mu.Unlock() - - b.recent++ - b.last = time.Now().UTC() - return false // never drop events. -} - -var ( - // DefaultExponentialBackoffConfig provides a default configuration for - // exponential backoff. - DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ - Base: time.Second, - Factor: time.Second, - Max: 20 * time.Second, - } -) - -// ExponentialBackoffConfig configures backoff parameters. -// -// Note that these parameters operate on the upper bound for choosing a random -// value. For example, at Base=1s, a random value in [0,1s) will be chosen for -// the backoff value. -type ExponentialBackoffConfig struct { - // Base is the minimum bound for backing off after failure. - Base time.Duration - - // Factor sets the amount of time by which the backoff grows with each - // failure. - Factor time.Duration - - // Max is the absolute maxiumum bound for a single backoff. - Max time.Duration -} - -// ExponentialBackoff implements random backoff with exponentially increasing -// bounds as the number consecutive failures increase. -type ExponentialBackoff struct { - failures uint64 // consecutive failure counter (needs to be 64-bit aligned) - config ExponentialBackoffConfig -} - -// NewExponentialBackoff returns an exponential backoff strategy with the -// desired config. If config is nil, the default is returned. -func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { - return &ExponentialBackoff{ - config: config, - } -} - -// Proceed returns the next randomly bound exponential backoff time. -func (b *ExponentialBackoff) Proceed(event Event) time.Duration { - return b.backoff(atomic.LoadUint64(&b.failures)) -} - -// Success resets the failures counter. -func (b *ExponentialBackoff) Success(event Event) { - atomic.StoreUint64(&b.failures, 0) -} - -// Failure increments the failure counter. -func (b *ExponentialBackoff) Failure(event Event, err error) bool { - atomic.AddUint64(&b.failures, 1) - return false -} - -// backoff calculates the amount of time to wait based on the number of -// consecutive failures. -func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { - if failures <= 0 { - // proceed normally when there are no failures. - return 0 - } - - factor := b.config.Factor - if factor <= 0 { - factor = DefaultExponentialBackoffConfig.Factor - } - - backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) - - max := b.config.Max - if max <= 0 { - max = DefaultExponentialBackoffConfig.Max - } - - if backoff > max || backoff < 0 { - backoff = max - } - - // Choose a uniformly distributed value from [0, backoff). - return time.Duration(rand.Int63n(int64(backoff))) -} diff --git a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md deleted file mode 100644 index b8a512c..0000000 --- a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# Contributing - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/docker/go-metrics/LICENSE deleted file mode 100644 index 8f3fee6..0000000 --- a/vendor/github.com/docker/go-metrics/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs deleted file mode 100644 index e26cd4f..0000000 --- a/vendor/github.com/docker/go-metrics/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE deleted file mode 100644 index 8915f02..0000000 --- a/vendor/github.com/docker/go-metrics/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md deleted file mode 100644 index a9e947c..0000000 --- a/vendor/github.com/docker/go-metrics/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) - -This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -## Best Practices - -This packages is meant to be used for collecting metrics in Docker projects. -It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. -If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). - -The following are a few Docker specific rules that will help you name and work with metrics in your project. - -1. Namespace and Subsystem - -This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. - -```go -ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, -}) -``` - -In the example above we are creating metrics for the Docker engine's daemon package. -`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. - -A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. - -2. Declaring your Metrics - -Try to keep all your metric declarations in one file. -This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. - -3. Use labels instead of multiple metrics - -Labels allow you to define one metric such as the time it takes to perform a certain action on an object. -If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. - - -```go -containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") -``` - -The last parameter is the label name or key. -When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. - -```go -containerActions.WithValues("create").UpdateSince(start) -``` - -4. Always use a unit - -The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. -For a timer, the standard unit is seconds and a counter's standard unit is a total. -For gauges you must provide the unit. -This package provides a standard set of units for use within the Docker projects. - -```go -Nanoseconds Unit = "nanoseconds" -Seconds Unit = "seconds" -Bytes Unit = "bytes" -Total Unit = "total" -``` - -If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. - -## Docs - -Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). - -## HTTP Metrics - -To instrument a http handler, you can wrap the code like this: - -```go -namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) -httpMetrics := namespace.NewDefaultHttpMetrics() -metrics.Register(namespace) -instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) -``` -Note: The `handler` label must be provided when a new namespace is created. - -## Additional Metrics - -Additional metrics are also defined here that are not available in the prometheus client. -If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. - - -## Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go deleted file mode 100644 index fe36316..0000000 --- a/vendor/github.com/docker/go-metrics/counter.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Counter is a metrics that can only increment its current count -type Counter interface { - // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. - // - // If len(vs) == 0, increments the counter by 1. - Inc(vs ...float64) -} - -// LabeledCounter is counter that must have labels populated before use. -type LabeledCounter interface { - WithValues(vs ...string) Counter -} - -type labeledCounter struct { - pc *prometheus.CounterVec -} - -func (lc *labeledCounter) WithValues(vs ...string) Counter { - return &counter{pc: lc.pc.WithLabelValues(vs...)} -} - -func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { - lc.pc.Describe(ch) -} - -func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { - lc.pc.Collect(ch) -} - -type counter struct { - pc prometheus.Counter -} - -func (c *counter) Inc(vs ...float64) { - if len(vs) == 0 { - c.pc.Inc() - } - - c.pc.Add(sumFloat64(vs...)) -} - -func (c *counter) Describe(ch chan<- *prometheus.Desc) { - c.pc.Describe(ch) -} - -func (c *counter) Collect(ch chan<- prometheus.Metric) { - c.pc.Collect(ch) -} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go deleted file mode 100644 index 8fbdfc6..0000000 --- a/vendor/github.com/docker/go-metrics/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go deleted file mode 100644 index 74296e8..0000000 --- a/vendor/github.com/docker/go-metrics/gauge.go +++ /dev/null @@ -1,72 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Gauge is a metric that allows incrementing and decrementing a value -type Gauge interface { - Inc(...float64) - Dec(...float64) - - // Add adds the provided value to the gauge's current value - Add(float64) - - // Set replaces the gauge's current value with the provided value - Set(float64) -} - -// LabeledGauge describes a gauge the must have values populated before use. -type LabeledGauge interface { - WithValues(labels ...string) Gauge -} - -type labeledGauge struct { - pg *prometheus.GaugeVec -} - -func (lg *labeledGauge) WithValues(labels ...string) Gauge { - return &gauge{pg: lg.pg.WithLabelValues(labels...)} -} - -func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { - lg.pg.Describe(c) -} - -func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { - lg.pg.Collect(c) -} - -type gauge struct { - pg prometheus.Gauge -} - -func (g *gauge) Inc(vs ...float64) { - if len(vs) == 0 { - g.pg.Inc() - } - - g.Add(sumFloat64(vs...)) -} - -func (g *gauge) Dec(vs ...float64) { - if len(vs) == 0 { - g.pg.Dec() - } - - g.Add(-sumFloat64(vs...)) -} - -func (g *gauge) Add(v float64) { - g.pg.Add(v) -} - -func (g *gauge) Set(v float64) { - g.pg.Set(v) -} - -func (g *gauge) Describe(c chan<- *prometheus.Desc) { - g.pg.Describe(c) -} - -func (g *gauge) Collect(c chan<- prometheus.Metric) { - g.pg.Collect(c) -} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go deleted file mode 100644 index 05601e9..0000000 --- a/vendor/github.com/docker/go-metrics/handler.go +++ /dev/null @@ -1,74 +0,0 @@ -package metrics - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -// HTTPHandlerOpts describes a set of configurable options of http metrics -type HTTPHandlerOpts struct { - DurationBuckets []float64 - RequestSizeBuckets []float64 - ResponseSizeBuckets []float64 -} - -const ( - InstrumentHandlerResponseSize = iota - InstrumentHandlerRequestSize - InstrumentHandlerDuration - InstrumentHandlerCounter - InstrumentHandlerInFlight -) - -type HTTPMetric struct { - prometheus.Collector - handlerType int -} - -var ( - defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} - defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G - defaultResponseSizeBuckets = defaultRequestSizeBuckets -) - -// Handler returns the global http.Handler that provides the prometheus -// metrics format on GET requests. This handler is no longer instrumented. -func Handler() http.Handler { - return promhttp.Handler() -} - -func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(metrics, handler.ServeHTTP) -} - -func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { - var handler http.Handler - handler = http.HandlerFunc(handlerFunc) - for _, metric := range metrics { - switch metric.handlerType { - case InstrumentHandlerResponseSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerResponseSize(collector, handler) - } - case InstrumentHandlerRequestSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerRequestSize(collector, handler) - } - case InstrumentHandlerDuration: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerDuration(collector, handler) - } - case InstrumentHandlerCounter: - if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { - handler = promhttp.InstrumentHandlerCounter(collector, handler) - } - case InstrumentHandlerInFlight: - if collector, ok := metric.Collector.(prometheus.Gauge); ok { - handler = promhttp.InstrumentHandlerInFlight(collector, handler) - } - } - } - return handler.ServeHTTP -} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go deleted file mode 100644 index 68b7f51..0000000 --- a/vendor/github.com/docker/go-metrics/helpers.go +++ /dev/null @@ -1,10 +0,0 @@ -package metrics - -func sumFloat64(vs ...float64) float64 { - var sum float64 - for _, v := range vs { - sum += v - } - - return sum -} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go deleted file mode 100644 index 7983154..0000000 --- a/vendor/github.com/docker/go-metrics/namespace.go +++ /dev/null @@ -1,315 +0,0 @@ -package metrics - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -type Labels map[string]string - -// NewNamespace returns a namespaces that is responsible for managing a collection of -// metrics for a particual namespace and subsystem -// -// labels allows const labels to be added to all metrics created in this namespace -// and are commonly used for data like application version and git commit -func NewNamespace(name, subsystem string, labels Labels) *Namespace { - if labels == nil { - labels = make(map[string]string) - } - return &Namespace{ - name: name, - subsystem: subsystem, - labels: labels, - } -} - -// Namespace describes a set of metrics that share a namespace and subsystem. -type Namespace struct { - name string - subsystem string - labels Labels - mu sync.Mutex - metrics []prometheus.Collector -} - -// WithConstLabels returns a namespace with the provided set of labels merged -// with the existing constant labels on the namespace. -// -// Only metrics created with the returned namespace will get the new constant -// labels. The returned namespace must be registered separately. -func (n *Namespace) WithConstLabels(labels Labels) *Namespace { - n.mu.Lock() - ns := &Namespace{ - name: n.name, - subsystem: n.subsystem, - labels: mergeLabels(n.labels, labels), - } - n.mu.Unlock() - return ns -} - -func (n *Namespace) NewCounter(name, help string) Counter { - c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} - n.Add(c) - return c -} - -func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { - c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} - n.Add(c) - return c -} - -func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { - return prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Total), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewTimer(name, help string) Timer { - t := &timer{ - m: prometheus.NewHistogram(n.newTimerOpts(name, help)), - } - n.Add(t) - return t -} - -func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { - t := &labeledTimer{ - m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), - } - n.Add(t) - return t -} - -func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { - return prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Seconds), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { - g := &gauge{ - pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), - } - n.Add(g) - return g -} - -func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { - g := &labeledGauge{ - pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), - } - n.Add(g) - return g -} - -func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { - return prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, unit), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Describe(ch) - } -} - -func (n *Namespace) Collect(ch chan<- prometheus.Metric) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Collect(ch) - } -} - -func (n *Namespace) Add(collector prometheus.Collector) { - n.mu.Lock() - n.metrics = append(n.metrics, collector) - n.mu.Unlock() -} - -func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { - name = makeName(name, unit) - namespace := n.name - if n.subsystem != "" { - namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) - } - name = fmt.Sprintf("%s_%s", namespace, name) - return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) -} - -// mergeLabels merges two or more labels objects into a single map, favoring -// the later labels. -func mergeLabels(lbs ...Labels) Labels { - merged := make(Labels) - - for _, target := range lbs { - for k, v := range target { - merged[k] = v - } - } - - return merged -} - -func makeName(name string, unit Unit) string { - if unit == "" { - return name - } - - return fmt.Sprintf("%s_%s", name, unit) -} - -func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: defaultDurationBuckets, - RequestSizeBuckets: defaultResponseSizeBuckets, - ResponseSizeBuckets: defaultResponseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: durationBuckets, - RequestSizeBuckets: requestSizeBuckets, - ResponseSizeBuckets: responseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { - var httpMetrics []*HTTPMetric - inFlightMetric := n.NewInFlightGaugeMetric(handlerName) - requestTotalMetric := n.NewRequestTotalMetric(handlerName) - requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) - requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) - responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) - httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) - return httpMetrics -} - -func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "in_flight_requests", - Help: "The in-flight HTTP requests", - ConstLabels: prometheus.Labels(labels), - }) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerInFlight, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: prometheus.Labels(labels), - }, - []string{"code", "method"}, - ) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerCounter, - } - n.Add(httpMetric) - return httpMetric -} -func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("DurationBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_duration_seconds", - Help: "The HTTP request latencies in seconds.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{"method"}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerDuration, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("RequestSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_size_bytes", - Help: "The HTTP request sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerRequestSize, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("ResponseSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "response_size_bytes", - Help: "The HTTP response sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metrics := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metrics, - handlerType: InstrumentHandlerResponseSize, - } - n.Add(httpMetric) - return httpMetric -} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go deleted file mode 100644 index 708358d..0000000 --- a/vendor/github.com/docker/go-metrics/register.go +++ /dev/null @@ -1,15 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Register adds all the metrics in the provided namespace to the global -// metrics registry -func Register(n *Namespace) { - prometheus.MustRegister(n) -} - -// Deregister removes all the metrics in the provided namespace from the -// global metrics registry -func Deregister(n *Namespace) { - prometheus.Unregister(n) -} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go deleted file mode 100644 index 824c987..0000000 --- a/vendor/github.com/docker/go-metrics/timer.go +++ /dev/null @@ -1,85 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// StartTimer begins a timer observation at the callsite. When the target -// operation is completed, the caller should call the return done func(). -func StartTimer(timer Timer) (done func()) { - start := time.Now() - return func() { - timer.Update(time.Since(start)) - } -} - -// Timer is a metric that allows collecting the duration of an action in seconds -type Timer interface { - // Update records an observation, duration, and converts to the target - // units. - Update(duration time.Duration) - - // UpdateSince will add the duration from the provided starting time to the - // timer's summary with the precisions that was used in creation of the timer - UpdateSince(time.Time) -} - -// LabeledTimer is a timer that must have label values populated before use. -type LabeledTimer interface { - WithValues(labels ...string) *labeledTimerObserver -} - -type labeledTimer struct { - m *prometheus.HistogramVec -} - -type labeledTimerObserver struct { - m prometheus.Observer -} - -func (lbo *labeledTimerObserver) Update(duration time.Duration) { - lbo.m.Observe(duration.Seconds()) -} - -func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { - lbo.m.Observe(time.Since(since).Seconds()) -} - -func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { - return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} -} - -func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { - lt.m.Describe(c) -} - -func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { - lt.m.Collect(c) -} - -type timer struct { - m prometheus.Observer -} - -func (t *timer) Update(duration time.Duration) { - t.m.Observe(duration.Seconds()) -} - -func (t *timer) UpdateSince(since time.Time) { - t.m.Observe(time.Since(since).Seconds()) -} - -func (t *timer) Describe(c chan<- *prometheus.Desc) { - c <- t.m.(prometheus.Metric).Desc() -} - -func (t *timer) Collect(c chan<- prometheus.Metric) { - // Are there any observers that don't implement Collector? It is really - // unclear what the point of the upstream change was, but we'll let this - // panic if we get an observer that doesn't implement collector. In this - // case, we should almost always see metricVec objects, so this should - // never panic. - t.m.(prometheus.Collector).Collect(c) -} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go deleted file mode 100644 index c96622f..0000000 --- a/vendor/github.com/docker/go-metrics/unit.go +++ /dev/null @@ -1,12 +0,0 @@ -package metrics - -// Unit represents the type or precision of a metric that is appended to -// the metrics fully qualified name -type Unit string - -const ( - Nanoseconds Unit = "nanoseconds" - Seconds Unit = "seconds" - Bytes Unit = "bytes" - Total Unit = "total" -) diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d7..0000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37b..0000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c7..0000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e..0000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d605..0000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd874..0000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index 85f6ab0..0000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { - size *= float64(mul) - } - - return int64(size), nil -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400..0000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md deleted file mode 100644 index d4eddcc..0000000 --- a/vendor/github.com/docker/spdystream/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to SpdyStream - -Want to hack on spdystream? Awesome! Here are instructions to get you -started. - -SpdyStream is a part of the [Docker](https://docker.io) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE deleted file mode 100644 index 9e4bd4d..0000000 --- a/vendor/github.com/docker/spdystream/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs deleted file mode 100644 index e26cd4f..0000000 --- a/vendor/github.com/docker/spdystream/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS deleted file mode 100644 index 14e2633..0000000 --- a/vendor/github.com/docker/spdystream/MAINTAINERS +++ /dev/null @@ -1,28 +0,0 @@ -# Spdystream maintainers file -# -# This file describes who runs the docker/spdystream project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "dmcgowan", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@docker.com" - GitHub = "dmcgowan" diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md deleted file mode 100644 index 11cccd0..0000000 --- a/vendor/github.com/docker/spdystream/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# SpdyStream - -A multiplexed stream library using spdy - -## Usage - -Client example (connecting to mirroring server without auth) - -```go -package main - -import ( - "fmt" - "github.com/docker/spdystream" - "net" - "net/http" -) - -func main() { - conn, err := net.Dial("tcp", "localhost:8080") - if err != nil { - panic(err) - } - spdyConn, err := spdystream.NewConnection(conn, false) - if err != nil { - panic(err) - } - go spdyConn.Serve(spdystream.NoOpStreamHandler) - stream, err := spdyConn.CreateStream(http.Header{}, nil, false) - if err != nil { - panic(err) - } - - stream.Wait() - - fmt.Fprint(stream, "Writing to stream") - - buf := make([]byte, 25) - stream.Read(buf) - fmt.Println(string(buf)) - - stream.Close() -} -``` - -Server example (mirroring server without auth) - -```go -package main - -import ( - "github.com/docker/spdystream" - "net" -) - -func main() { - listener, err := net.Listen("tcp", "localhost:8080") - if err != nil { - panic(err) - } - for { - conn, err := listener.Accept() - if err != nil { - panic(err) - } - spdyConn, err := spdystream.NewConnection(conn, true) - if err != nil { - panic(err) - } - go spdyConn.Serve(spdystream.MirrorStreamHandler) - } -} -``` - -## Copyright and license - -Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go deleted file mode 100644 index 6031a0d..0000000 --- a/vendor/github.com/docker/spdystream/connection.go +++ /dev/null @@ -1,958 +0,0 @@ -package spdystream - -import ( - "errors" - "fmt" - "io" - "net" - "net/http" - "sync" - "time" - - "github.com/docker/spdystream/spdy" -) - -var ( - ErrInvalidStreamId = errors.New("Invalid stream id") - ErrTimeout = errors.New("Timeout occured") - ErrReset = errors.New("Stream reset") - ErrWriteClosedStream = errors.New("Write on closed stream") -) - -const ( - FRAME_WORKERS = 5 - QUEUE_SIZE = 50 -) - -type StreamHandler func(stream *Stream) - -type AuthHandler func(header http.Header, slot uint8, parent uint32) bool - -type idleAwareFramer struct { - f *spdy.Framer - conn *Connection - writeLock sync.Mutex - resetChan chan struct{} - setTimeoutLock sync.Mutex - setTimeoutChan chan time.Duration - timeout time.Duration -} - -func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { - iaf := &idleAwareFramer{ - f: framer, - resetChan: make(chan struct{}, 2), - // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about - // the same time the connection is being closed - setTimeoutChan: make(chan time.Duration, 1), - } - return iaf -} - -func (i *idleAwareFramer) monitor() { - var ( - timer *time.Timer - expired <-chan time.Time - resetChan = i.resetChan - setTimeoutChan = i.setTimeoutChan - ) -Loop: - for { - select { - case timeout := <-i.setTimeoutChan: - i.timeout = timeout - if timeout == 0 { - if timer != nil { - timer.Stop() - } - } else { - if timer == nil { - timer = time.NewTimer(timeout) - expired = timer.C - } else { - timer.Reset(timeout) - } - } - case <-resetChan: - if timer != nil && i.timeout > 0 { - timer.Reset(i.timeout) - } - case <-expired: - i.conn.streamCond.L.Lock() - streams := i.conn.streams - i.conn.streams = make(map[spdy.StreamId]*Stream) - i.conn.streamCond.Broadcast() - i.conn.streamCond.L.Unlock() - go func() { - for _, stream := range streams { - stream.resetStream() - } - i.conn.Close() - }() - case <-i.conn.closeChan: - if timer != nil { - timer.Stop() - } - - // Start a goroutine to drain resetChan. This is needed because we've seen - // some unit tests with large numbers of goroutines get into a situation - // where resetChan fills up, at least 1 call to Write() is still trying to - // send to resetChan, the connection gets closed, and this case statement - // attempts to grab the write lock that Write() already has, causing a - // deadlock. - // - // See https://github.com/docker/spdystream/issues/49 for more details. - go func() { - for _ = range resetChan { - } - }() - - go func() { - for _ = range setTimeoutChan { - } - }() - - i.writeLock.Lock() - close(resetChan) - i.resetChan = nil - i.writeLock.Unlock() - - i.setTimeoutLock.Lock() - close(i.setTimeoutChan) - i.setTimeoutChan = nil - i.setTimeoutLock.Unlock() - - break Loop - } - } - - // Drain resetChan - for _ = range resetChan { - } -} - -func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { - i.writeLock.Lock() - defer i.writeLock.Unlock() - if i.resetChan == nil { - return io.EOF - } - err := i.f.WriteFrame(frame) - if err != nil { - return err - } - - i.resetChan <- struct{}{} - - return nil -} - -func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { - frame, err := i.f.ReadFrame() - if err != nil { - return nil, err - } - - // resetChan should never be closed since it is only closed - // when the connection has closed its closeChan. This closure - // only occurs after all Reads have finished - // TODO (dmcgowan): refactor relationship into connection - i.resetChan <- struct{}{} - - return frame, nil -} - -func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { - i.setTimeoutLock.Lock() - defer i.setTimeoutLock.Unlock() - - if i.setTimeoutChan == nil { - return - } - - i.setTimeoutChan <- timeout -} - -type Connection struct { - conn net.Conn - framer *idleAwareFramer - - closeChan chan bool - goneAway bool - lastStreamChan chan<- *Stream - goAwayTimeout time.Duration - closeTimeout time.Duration - - streamLock *sync.RWMutex - streamCond *sync.Cond - streams map[spdy.StreamId]*Stream - - nextIdLock sync.Mutex - receiveIdLock sync.Mutex - nextStreamId spdy.StreamId - receivedStreamId spdy.StreamId - - pingIdLock sync.Mutex - pingId uint32 - pingChans map[uint32]chan error - - shutdownLock sync.Mutex - shutdownChan chan error - hasShutdown bool - - // for testing https://github.com/docker/spdystream/pull/56 - dataFrameHandler func(*spdy.DataFrame) error -} - -// NewConnection creates a new spdy connection from an existing -// network connection. -func NewConnection(conn net.Conn, server bool) (*Connection, error) { - framer, framerErr := spdy.NewFramer(conn, conn) - if framerErr != nil { - return nil, framerErr - } - idleAwareFramer := newIdleAwareFramer(framer) - var sid spdy.StreamId - var rid spdy.StreamId - var pid uint32 - if server { - sid = 2 - rid = 1 - pid = 2 - } else { - sid = 1 - rid = 2 - pid = 1 - } - - streamLock := new(sync.RWMutex) - streamCond := sync.NewCond(streamLock) - - session := &Connection{ - conn: conn, - framer: idleAwareFramer, - - closeChan: make(chan bool), - goAwayTimeout: time.Duration(0), - closeTimeout: time.Duration(0), - - streamLock: streamLock, - streamCond: streamCond, - streams: make(map[spdy.StreamId]*Stream), - nextStreamId: sid, - receivedStreamId: rid, - - pingId: pid, - pingChans: make(map[uint32]chan error), - - shutdownChan: make(chan error), - } - session.dataFrameHandler = session.handleDataFrame - idleAwareFramer.conn = session - go idleAwareFramer.monitor() - - return session, nil -} - -// Ping sends a ping frame across the connection and -// returns the response time -func (s *Connection) Ping() (time.Duration, error) { - pid := s.pingId - s.pingIdLock.Lock() - if s.pingId > 0x7ffffffe { - s.pingId = s.pingId - 0x7ffffffe - } else { - s.pingId = s.pingId + 2 - } - s.pingIdLock.Unlock() - pingChan := make(chan error) - s.pingChans[pid] = pingChan - defer delete(s.pingChans, pid) - - frame := &spdy.PingFrame{Id: pid} - startTime := time.Now() - writeErr := s.framer.WriteFrame(frame) - if writeErr != nil { - return time.Duration(0), writeErr - } - select { - case <-s.closeChan: - return time.Duration(0), errors.New("connection closed") - case err, ok := <-pingChan: - if ok && err != nil { - return time.Duration(0), err - } - break - } - return time.Now().Sub(startTime), nil -} - -// Serve handles frames sent from the server, including reply frames -// which are needed to fully initiate connections. Both clients and servers -// should call Serve in a separate goroutine before creating streams. -func (s *Connection) Serve(newHandler StreamHandler) { - // use a WaitGroup to wait for all frames to be drained after receiving - // go-away. - var wg sync.WaitGroup - - // Parition queues to ensure stream frames are handled - // by the same worker, ensuring order is maintained - frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) - for i := 0; i < FRAME_WORKERS; i++ { - frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) - - // Ensure frame queue is drained when connection is closed - go func(frameQueue *PriorityFrameQueue) { - <-s.closeChan - frameQueue.Drain() - }(frameQueues[i]) - - wg.Add(1) - go func(frameQueue *PriorityFrameQueue) { - // let the WaitGroup know this worker is done - defer wg.Done() - - s.frameHandler(frameQueue, newHandler) - }(frameQueues[i]) - } - - var ( - partitionRoundRobin int - goAwayFrame *spdy.GoAwayFrame - ) -Loop: - for { - readFrame, err := s.framer.ReadFrame() - if err != nil { - if err != io.EOF { - fmt.Errorf("frame read error: %s", err) - } else { - debugMessage("(%p) EOF received", s) - } - break - } - var priority uint8 - var partition int - switch frame := readFrame.(type) { - case *spdy.SynStreamFrame: - if s.checkStreamFrame(frame) { - priority = frame.Priority - partition = int(frame.StreamId % FRAME_WORKERS) - debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) - s.addStreamFrame(frame) - } else { - debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) - continue - } - case *spdy.SynReplyFrame: - priority = s.getStreamPriority(frame.StreamId) - partition = int(frame.StreamId % FRAME_WORKERS) - case *spdy.DataFrame: - priority = s.getStreamPriority(frame.StreamId) - partition = int(frame.StreamId % FRAME_WORKERS) - case *spdy.RstStreamFrame: - priority = s.getStreamPriority(frame.StreamId) - partition = int(frame.StreamId % FRAME_WORKERS) - case *spdy.HeadersFrame: - priority = s.getStreamPriority(frame.StreamId) - partition = int(frame.StreamId % FRAME_WORKERS) - case *spdy.PingFrame: - priority = 0 - partition = partitionRoundRobin - partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS - case *spdy.GoAwayFrame: - // hold on to the go away frame and exit the loop - goAwayFrame = frame - break Loop - default: - priority = 7 - partition = partitionRoundRobin - partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS - } - frameQueues[partition].Push(readFrame, priority) - } - close(s.closeChan) - - // wait for all frame handler workers to indicate they've drained their queues - // before handling the go away frame - wg.Wait() - - if goAwayFrame != nil { - s.handleGoAwayFrame(goAwayFrame) - } - - // now it's safe to close remote channels and empty s.streams - s.streamCond.L.Lock() - // notify streams that they're now closed, which will - // unblock any stream Read() calls - for _, stream := range s.streams { - stream.closeRemoteChannels() - } - s.streams = make(map[spdy.StreamId]*Stream) - s.streamCond.Broadcast() - s.streamCond.L.Unlock() -} - -func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { - for { - popFrame := frameQueue.Pop() - if popFrame == nil { - return - } - - var frameErr error - switch frame := popFrame.(type) { - case *spdy.SynStreamFrame: - frameErr = s.handleStreamFrame(frame, newHandler) - case *spdy.SynReplyFrame: - frameErr = s.handleReplyFrame(frame) - case *spdy.DataFrame: - frameErr = s.dataFrameHandler(frame) - case *spdy.RstStreamFrame: - frameErr = s.handleResetFrame(frame) - case *spdy.HeadersFrame: - frameErr = s.handleHeaderFrame(frame) - case *spdy.PingFrame: - frameErr = s.handlePingFrame(frame) - case *spdy.GoAwayFrame: - frameErr = s.handleGoAwayFrame(frame) - default: - frameErr = fmt.Errorf("unhandled frame type: %T", frame) - } - - if frameErr != nil { - fmt.Errorf("frame handling error: %s", frameErr) - } - } -} - -func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { - stream, streamOk := s.getStream(streamId) - if !streamOk { - return 7 - } - return stream.priority -} - -func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { - var parent *Stream - if frame.AssociatedToStreamId != spdy.StreamId(0) { - parent, _ = s.getStream(frame.AssociatedToStreamId) - } - - stream := &Stream{ - streamId: frame.StreamId, - parent: parent, - conn: s, - startChan: make(chan error), - headers: frame.Headers, - finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, - replyCond: sync.NewCond(new(sync.Mutex)), - dataChan: make(chan []byte), - headerChan: make(chan http.Header), - closeChan: make(chan bool), - } - if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { - stream.closeRemoteChannels() - } - - s.addStream(stream) -} - -// checkStreamFrame checks to see if a stream frame is allowed. -// If the stream is invalid, then a reset frame with protocol error -// will be returned. -func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { - s.receiveIdLock.Lock() - defer s.receiveIdLock.Unlock() - if s.goneAway { - return false - } - validationErr := s.validateStreamId(frame.StreamId) - if validationErr != nil { - go func() { - resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) - if resetErr != nil { - fmt.Errorf("reset error: %s", resetErr) - } - }() - return false - } - return true -} - -func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { - stream, ok := s.getStream(frame.StreamId) - if !ok { - return fmt.Errorf("Missing stream: %d", frame.StreamId) - } - - newHandler(stream) - - return nil -} - -func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { - debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) - stream, streamOk := s.getStream(frame.StreamId) - if !streamOk { - debugMessage("Reply frame gone away for %d", frame.StreamId) - // Stream has already gone away - return nil - } - if stream.replied { - // Stream has already received reply - return nil - } - stream.replied = true - - // TODO Check for error - if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { - s.remoteStreamFinish(stream) - } - - close(stream.startChan) - - return nil -} - -func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { - stream, streamOk := s.getStream(frame.StreamId) - if !streamOk { - // Stream has already been removed - return nil - } - s.removeStream(stream) - stream.closeRemoteChannels() - - if !stream.replied { - stream.replied = true - stream.startChan <- ErrReset - close(stream.startChan) - } - - stream.finishLock.Lock() - stream.finished = true - stream.finishLock.Unlock() - - return nil -} - -func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { - stream, streamOk := s.getStream(frame.StreamId) - if !streamOk { - // Stream has already gone away - return nil - } - if !stream.replied { - // No reply received...Protocol error? - return nil - } - - // TODO limit headers while not blocking (use buffered chan or goroutine?) - select { - case <-stream.closeChan: - return nil - case stream.headerChan <- frame.Headers: - } - - if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { - s.remoteStreamFinish(stream) - } - - return nil -} - -func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { - debugMessage("(%p) Data frame received for %d", s, frame.StreamId) - stream, streamOk := s.getStream(frame.StreamId) - if !streamOk { - debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) - // Stream has already gone away - return nil - } - if !stream.replied { - debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) - // No reply received...Protocol error? - return nil - } - - debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) - if len(frame.Data) > 0 { - stream.dataLock.RLock() - select { - case <-stream.closeChan: - debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) - case stream.dataChan <- frame.Data: - debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) - } - stream.dataLock.RUnlock() - } - if (frame.Flags & spdy.DataFlagFin) != 0x00 { - s.remoteStreamFinish(stream) - } - return nil -} - -func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { - if s.pingId&0x01 != frame.Id&0x01 { - return s.framer.WriteFrame(frame) - } - pingChan, pingOk := s.pingChans[frame.Id] - if pingOk { - close(pingChan) - } - return nil -} - -func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { - debugMessage("(%p) Go away received", s) - s.receiveIdLock.Lock() - if s.goneAway { - s.receiveIdLock.Unlock() - return nil - } - s.goneAway = true - s.receiveIdLock.Unlock() - - if s.lastStreamChan != nil { - stream, _ := s.getStream(frame.LastGoodStreamId) - go func() { - s.lastStreamChan <- stream - }() - } - - // Do not block frame handler waiting for closure - go s.shutdown(s.goAwayTimeout) - - return nil -} - -func (s *Connection) remoteStreamFinish(stream *Stream) { - stream.closeRemoteChannels() - - stream.finishLock.Lock() - if stream.finished { - // Stream is fully closed, cleanup - s.removeStream(stream) - } - stream.finishLock.Unlock() -} - -// CreateStream creates a new spdy stream using the parameters for -// creating the stream frame. The stream frame will be sent upon -// calling this function, however this function does not wait for -// the reply frame. If waiting for the reply is desired, use -// the stream Wait or WaitTimeout function on the stream returned -// by this function. -func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { - // MUST synchronize stream creation (all the way to writing the frame) - // as stream IDs **MUST** increase monotonically. - s.nextIdLock.Lock() - defer s.nextIdLock.Unlock() - - streamId := s.getNextStreamId() - if streamId == 0 { - return nil, fmt.Errorf("Unable to get new stream id") - } - - stream := &Stream{ - streamId: streamId, - parent: parent, - conn: s, - startChan: make(chan error), - headers: headers, - dataChan: make(chan []byte), - headerChan: make(chan http.Header), - closeChan: make(chan bool), - } - - debugMessage("(%p) (%p) Create stream", s, stream) - - s.addStream(stream) - - return stream, s.sendStream(stream, fin) -} - -func (s *Connection) shutdown(closeTimeout time.Duration) { - // TODO Ensure this isn't called multiple times - s.shutdownLock.Lock() - if s.hasShutdown { - s.shutdownLock.Unlock() - return - } - s.hasShutdown = true - s.shutdownLock.Unlock() - - var timeout <-chan time.Time - if closeTimeout > time.Duration(0) { - timeout = time.After(closeTimeout) - } - streamsClosed := make(chan bool) - - go func() { - s.streamCond.L.Lock() - for len(s.streams) > 0 { - debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) - s.streamCond.Wait() - } - s.streamCond.L.Unlock() - close(streamsClosed) - }() - - var err error - select { - case <-streamsClosed: - // No active streams, close should be safe - err = s.conn.Close() - case <-timeout: - // Force ungraceful close - err = s.conn.Close() - // Wait for cleanup to clear active streams - <-streamsClosed - } - - if err != nil { - duration := 10 * time.Minute - time.AfterFunc(duration, func() { - select { - case err, ok := <-s.shutdownChan: - if ok { - fmt.Errorf("Unhandled close error after %s: %s", duration, err) - } - default: - } - }) - s.shutdownChan <- err - } - close(s.shutdownChan) - - return -} - -// Closes spdy connection by sending GoAway frame and initiating shutdown -func (s *Connection) Close() error { - s.receiveIdLock.Lock() - if s.goneAway { - s.receiveIdLock.Unlock() - return nil - } - s.goneAway = true - s.receiveIdLock.Unlock() - - var lastStreamId spdy.StreamId - if s.receivedStreamId > 2 { - lastStreamId = s.receivedStreamId - 2 - } - - goAwayFrame := &spdy.GoAwayFrame{ - LastGoodStreamId: lastStreamId, - Status: spdy.GoAwayOK, - } - - err := s.framer.WriteFrame(goAwayFrame) - if err != nil { - return err - } - - go s.shutdown(s.closeTimeout) - - return nil -} - -// CloseWait closes the connection and waits for shutdown -// to finish. Note the underlying network Connection -// is not closed until the end of shutdown. -func (s *Connection) CloseWait() error { - closeErr := s.Close() - if closeErr != nil { - return closeErr - } - shutdownErr, ok := <-s.shutdownChan - if ok { - return shutdownErr - } - return nil -} - -// Wait waits for the connection to finish shutdown or for -// the wait timeout duration to expire. This needs to be -// called either after Close has been called or the GOAWAYFRAME -// has been received. If the wait timeout is 0, this function -// will block until shutdown finishes. If wait is never called -// and a shutdown error occurs, that error will be logged as an -// unhandled error. -func (s *Connection) Wait(waitTimeout time.Duration) error { - var timeout <-chan time.Time - if waitTimeout > time.Duration(0) { - timeout = time.After(waitTimeout) - } - - select { - case err, ok := <-s.shutdownChan: - if ok { - return err - } - case <-timeout: - return ErrTimeout - } - return nil -} - -// NotifyClose registers a channel to be called when the remote -// peer inidicates connection closure. The last stream to be -// received by the remote will be sent on the channel. The notify -// timeout will determine the duration between go away received -// and the connection being closed. -func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { - s.goAwayTimeout = timeout - s.lastStreamChan = c -} - -// SetCloseTimeout sets the amount of time close will wait for -// streams to finish before terminating the underlying network -// connection. Setting the timeout to 0 will cause close to -// wait forever, which is the default. -func (s *Connection) SetCloseTimeout(timeout time.Duration) { - s.closeTimeout = timeout -} - -// SetIdleTimeout sets the amount of time the connection may sit idle before -// it is forcefully terminated. -func (s *Connection) SetIdleTimeout(timeout time.Duration) { - s.framer.setIdleTimeout(timeout) -} - -func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { - var flags spdy.ControlFlags - if fin { - flags = spdy.ControlFlagFin - } - - headerFrame := &spdy.HeadersFrame{ - StreamId: stream.streamId, - Headers: headers, - CFHeader: spdy.ControlFrameHeader{Flags: flags}, - } - - return s.framer.WriteFrame(headerFrame) -} - -func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { - var flags spdy.ControlFlags - if fin { - flags = spdy.ControlFlagFin - } - - replyFrame := &spdy.SynReplyFrame{ - StreamId: stream.streamId, - Headers: headers, - CFHeader: spdy.ControlFrameHeader{Flags: flags}, - } - - return s.framer.WriteFrame(replyFrame) -} - -func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { - resetFrame := &spdy.RstStreamFrame{ - StreamId: streamId, - Status: status, - } - - return s.framer.WriteFrame(resetFrame) -} - -func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { - return s.sendResetFrame(status, stream.streamId) -} - -func (s *Connection) sendStream(stream *Stream, fin bool) error { - var flags spdy.ControlFlags - if fin { - flags = spdy.ControlFlagFin - stream.finished = true - } - - var parentId spdy.StreamId - if stream.parent != nil { - parentId = stream.parent.streamId - } - - streamFrame := &spdy.SynStreamFrame{ - StreamId: spdy.StreamId(stream.streamId), - AssociatedToStreamId: spdy.StreamId(parentId), - Headers: stream.headers, - CFHeader: spdy.ControlFrameHeader{Flags: flags}, - } - - return s.framer.WriteFrame(streamFrame) -} - -// getNextStreamId returns the next sequential id -// every call should produce a unique value or an error -func (s *Connection) getNextStreamId() spdy.StreamId { - sid := s.nextStreamId - if sid > 0x7fffffff { - return 0 - } - s.nextStreamId = s.nextStreamId + 2 - return sid -} - -// PeekNextStreamId returns the next sequential id and keeps the next id untouched -func (s *Connection) PeekNextStreamId() spdy.StreamId { - sid := s.nextStreamId - return sid -} - -func (s *Connection) validateStreamId(rid spdy.StreamId) error { - if rid > 0x7fffffff || rid < s.receivedStreamId { - return ErrInvalidStreamId - } - s.receivedStreamId = rid + 2 - return nil -} - -func (s *Connection) addStream(stream *Stream) { - s.streamCond.L.Lock() - s.streams[stream.streamId] = stream - debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) - s.streamCond.Broadcast() - s.streamCond.L.Unlock() -} - -func (s *Connection) removeStream(stream *Stream) { - s.streamCond.L.Lock() - delete(s.streams, stream.streamId) - debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) - s.streamCond.Broadcast() - s.streamCond.L.Unlock() -} - -func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { - s.streamLock.RLock() - stream, ok = s.streams[streamId] - s.streamLock.RUnlock() - return -} - -// FindStream looks up the given stream id and either waits for the -// stream to be found or returns nil if the stream id is no longer -// valid. -func (s *Connection) FindStream(streamId uint32) *Stream { - var stream *Stream - var ok bool - s.streamCond.L.Lock() - stream, ok = s.streams[spdy.StreamId(streamId)] - debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) - for !ok && streamId >= uint32(s.receivedStreamId) { - s.streamCond.Wait() - stream, ok = s.streams[spdy.StreamId(streamId)] - } - s.streamCond.L.Unlock() - return stream -} - -func (s *Connection) CloseChan() <-chan bool { - return s.closeChan -} diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go deleted file mode 100644 index b59fa5f..0000000 --- a/vendor/github.com/docker/spdystream/handlers.go +++ /dev/null @@ -1,38 +0,0 @@ -package spdystream - -import ( - "io" - "net/http" -) - -// MirrorStreamHandler mirrors all streams. -func MirrorStreamHandler(stream *Stream) { - replyErr := stream.SendReply(http.Header{}, false) - if replyErr != nil { - return - } - - go func() { - io.Copy(stream, stream) - stream.Close() - }() - go func() { - for { - header, receiveErr := stream.ReceiveHeader() - if receiveErr != nil { - return - } - sendErr := stream.SendHeader(header, false) - if sendErr != nil { - return - } - } - }() -} - -// NoopStreamHandler does nothing when stream connects, most -// likely used with RejectAuthHandler which will not allow any -// streams to make it to the stream handler. -func NoOpStreamHandler(stream *Stream) { - stream.SendReply(http.Header{}, false) -} diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go deleted file mode 100644 index fc8582b..0000000 --- a/vendor/github.com/docker/spdystream/priority.go +++ /dev/null @@ -1,98 +0,0 @@ -package spdystream - -import ( - "container/heap" - "sync" - - "github.com/docker/spdystream/spdy" -) - -type prioritizedFrame struct { - frame spdy.Frame - priority uint8 - insertId uint64 -} - -type frameQueue []*prioritizedFrame - -func (fq frameQueue) Len() int { - return len(fq) -} - -func (fq frameQueue) Less(i, j int) bool { - if fq[i].priority == fq[j].priority { - return fq[i].insertId < fq[j].insertId - } - return fq[i].priority < fq[j].priority -} - -func (fq frameQueue) Swap(i, j int) { - fq[i], fq[j] = fq[j], fq[i] -} - -func (fq *frameQueue) Push(x interface{}) { - *fq = append(*fq, x.(*prioritizedFrame)) -} - -func (fq *frameQueue) Pop() interface{} { - old := *fq - n := len(old) - *fq = old[0 : n-1] - return old[n-1] -} - -type PriorityFrameQueue struct { - queue *frameQueue - c *sync.Cond - size int - nextInsertId uint64 - drain bool -} - -func NewPriorityFrameQueue(size int) *PriorityFrameQueue { - queue := make(frameQueue, 0, size) - heap.Init(&queue) - - return &PriorityFrameQueue{ - queue: &queue, - size: size, - c: sync.NewCond(&sync.Mutex{}), - } -} - -func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { - q.c.L.Lock() - defer q.c.L.Unlock() - for q.queue.Len() >= q.size { - q.c.Wait() - } - pFrame := &prioritizedFrame{ - frame: frame, - priority: priority, - insertId: q.nextInsertId, - } - q.nextInsertId = q.nextInsertId + 1 - heap.Push(q.queue, pFrame) - q.c.Signal() -} - -func (q *PriorityFrameQueue) Pop() spdy.Frame { - q.c.L.Lock() - defer q.c.L.Unlock() - for q.queue.Len() == 0 { - if q.drain { - return nil - } - q.c.Wait() - } - frame := heap.Pop(q.queue).(*prioritizedFrame).frame - q.c.Signal() - return frame -} - -func (q *PriorityFrameQueue) Drain() { - q.c.L.Lock() - defer q.c.L.Unlock() - q.drain = true - q.c.Broadcast() -} diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go deleted file mode 100644 index 5a5ff0e..0000000 --- a/vendor/github.com/docker/spdystream/spdy/dictionary.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -// headerDictionary is the dictionary sent to the zlib compressor/decompressor. -var headerDictionary = []byte{ - 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, - 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, - 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, - 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, - 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, - 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, - 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, - 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, - 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, - 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, - 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, - 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, - 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, - 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, - 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, - 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, - 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, - 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, - 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, - 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, - 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, - 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, - 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, - 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, - 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, - 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, - 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, - 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, - 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, - 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, - 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, - 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, - 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, - 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, - 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, - 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, - 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, - 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, - 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, - 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, - 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, - 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, - 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, - 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, - 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, - 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, - 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, - 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, - 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, - 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, - 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, - 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, - 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, - 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, - 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, - 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, - 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, - 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, - 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, - 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, - 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, - 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, - 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, - 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, - 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, - 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, - 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, - 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, - 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, - 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, - 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, - 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, - 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, - 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, - 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, - 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, - 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, - 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, - 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, - 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, - 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, - 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, - 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, - 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, - 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, - 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, - 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, - 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, - 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, - 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, - 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, - 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, - 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, - 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, - 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, - 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, - 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, - 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, - 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, - 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, - 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, - 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, - 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, - 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, - 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, - 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, - 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, - 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, -} diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go deleted file mode 100644 index 9359a95..0000000 --- a/vendor/github.com/docker/spdystream/spdy/read.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -import ( - "compress/zlib" - "encoding/binary" - "io" - "net/http" - "strings" -) - -func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { - return f.readSynStreamFrame(h, frame) -} - -func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { - return f.readSynReplyFrame(h, frame) -} - -func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { - frame.CFHeader = h - if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { - return err - } - if frame.Status == 0 { - return &Error{InvalidControlFrame, frame.StreamId} - } - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - return nil -} - -func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { - frame.CFHeader = h - var numSettings uint32 - if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { - return err - } - frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) - for i := uint32(0); i < numSettings; i++ { - if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { - return err - } - frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) - frame.FlagIdValues[i].Id &= 0xffffff - if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { - return err - } - } - return nil -} - -func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { - frame.CFHeader = h - if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { - return err - } - if frame.Id == 0 { - return &Error{ZeroStreamId, 0} - } - if frame.CFHeader.Flags != 0 { - return &Error{InvalidControlFrame, StreamId(frame.Id)} - } - return nil -} - -func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { - frame.CFHeader = h - if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { - return err - } - if frame.CFHeader.Flags != 0 { - return &Error{InvalidControlFrame, frame.LastGoodStreamId} - } - if frame.CFHeader.length != 8 { - return &Error{InvalidControlFrame, frame.LastGoodStreamId} - } - if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { - return err - } - return nil -} - -func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { - return f.readHeadersFrame(h, frame) -} - -func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { - frame.CFHeader = h - if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - if frame.CFHeader.Flags != 0 { - return &Error{InvalidControlFrame, frame.StreamId} - } - if frame.CFHeader.length != 8 { - return &Error{InvalidControlFrame, frame.StreamId} - } - if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { - return err - } - return nil -} - -func newControlFrame(frameType ControlFrameType) (controlFrame, error) { - ctor, ok := cframeCtor[frameType] - if !ok { - return nil, &Error{Err: InvalidControlFrame} - } - return ctor(), nil -} - -var cframeCtor = map[ControlFrameType]func() controlFrame{ - TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, - TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, - TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, - TypeSettings: func() controlFrame { return new(SettingsFrame) }, - TypePing: func() controlFrame { return new(PingFrame) }, - TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, - TypeHeaders: func() controlFrame { return new(HeadersFrame) }, - TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, -} - -func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { - if f.headerDecompressor != nil { - f.headerReader.N = payloadSize - return nil - } - f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} - decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) - if err != nil { - return err - } - f.headerDecompressor = decompressor - return nil -} - -// ReadFrame reads SPDY encoded data and returns a decompressed Frame. -func (f *Framer) ReadFrame() (Frame, error) { - var firstWord uint32 - if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { - return nil, err - } - if firstWord&0x80000000 != 0 { - frameType := ControlFrameType(firstWord & 0xffff) - version := uint16(firstWord >> 16 & 0x7fff) - return f.parseControlFrame(version, frameType) - } - return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) -} - -func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { - var length uint32 - if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { - return nil, err - } - flags := ControlFlags((length & 0xff000000) >> 24) - length &= 0xffffff - header := ControlFrameHeader{version, frameType, flags, length} - cframe, err := newControlFrame(frameType) - if err != nil { - return nil, err - } - if err = cframe.read(header, f); err != nil { - return nil, err - } - return cframe, nil -} - -func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { - var numHeaders uint32 - if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { - return nil, err - } - var e error - h := make(http.Header, int(numHeaders)) - for i := 0; i < int(numHeaders); i++ { - var length uint32 - if err := binary.Read(r, binary.BigEndian, &length); err != nil { - return nil, err - } - nameBytes := make([]byte, length) - if _, err := io.ReadFull(r, nameBytes); err != nil { - return nil, err - } - name := string(nameBytes) - if name != strings.ToLower(name) { - e = &Error{UnlowercasedHeaderName, streamId} - name = strings.ToLower(name) - } - if h[name] != nil { - e = &Error{DuplicateHeaders, streamId} - } - if err := binary.Read(r, binary.BigEndian, &length); err != nil { - return nil, err - } - value := make([]byte, length) - if _, err := io.ReadFull(r, value); err != nil { - return nil, err - } - valueList := strings.Split(string(value), headerValueSeparator) - for _, v := range valueList { - h.Add(name, v) - } - } - if e != nil { - return h, e - } - return h, nil -} - -func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { - frame.CFHeader = h - var err error - if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { - return err - } - if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { - return err - } - frame.Priority >>= 5 - if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { - return err - } - reader := f.r - if !f.headerCompressionDisabled { - err := f.uncorkHeaderDecompressor(int64(h.length - 10)) - if err != nil { - return err - } - reader = f.headerDecompressor - } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) - if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { - err = &Error{WrongCompressedPayloadSize, 0} - } - if err != nil { - return err - } - for h := range frame.Headers { - if invalidReqHeaders[h] { - return &Error{InvalidHeaderPresent, frame.StreamId} - } - } - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - return nil -} - -func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { - frame.CFHeader = h - var err error - if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - reader := f.r - if !f.headerCompressionDisabled { - err := f.uncorkHeaderDecompressor(int64(h.length - 4)) - if err != nil { - return err - } - reader = f.headerDecompressor - } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) - if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { - err = &Error{WrongCompressedPayloadSize, 0} - } - if err != nil { - return err - } - for h := range frame.Headers { - if invalidRespHeaders[h] { - return &Error{InvalidHeaderPresent, frame.StreamId} - } - } - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - return nil -} - -func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { - frame.CFHeader = h - var err error - if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - reader := f.r - if !f.headerCompressionDisabled { - err := f.uncorkHeaderDecompressor(int64(h.length - 4)) - if err != nil { - return err - } - reader = f.headerDecompressor - } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) - if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { - err = &Error{WrongCompressedPayloadSize, 0} - } - if err != nil { - return err - } - var invalidHeaders map[string]bool - if frame.StreamId%2 == 0 { - invalidHeaders = invalidReqHeaders - } else { - invalidHeaders = invalidRespHeaders - } - for h := range frame.Headers { - if invalidHeaders[h] { - return &Error{InvalidHeaderPresent, frame.StreamId} - } - } - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - return nil -} - -func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { - var length uint32 - if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { - return nil, err - } - var frame DataFrame - frame.StreamId = streamId - frame.Flags = DataFlags(length >> 24) - length &= 0xffffff - frame.Data = make([]byte, length) - if _, err := io.ReadFull(f.r, frame.Data); err != nil { - return nil, err - } - if frame.StreamId == 0 { - return nil, &Error{ZeroStreamId, 0} - } - return &frame, nil -} diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go deleted file mode 100644 index 7b6ee9c..0000000 --- a/vendor/github.com/docker/spdystream/spdy/types.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package spdy implements the SPDY protocol (currently SPDY/3), described in -// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. -package spdy - -import ( - "bytes" - "compress/zlib" - "io" - "net/http" -) - -// Version is the protocol version number that this package implements. -const Version = 3 - -// ControlFrameType stores the type field in a control frame header. -type ControlFrameType uint16 - -const ( - TypeSynStream ControlFrameType = 0x0001 - TypeSynReply = 0x0002 - TypeRstStream = 0x0003 - TypeSettings = 0x0004 - TypePing = 0x0006 - TypeGoAway = 0x0007 - TypeHeaders = 0x0008 - TypeWindowUpdate = 0x0009 -) - -// ControlFlags are the flags that can be set on a control frame. -type ControlFlags uint8 - -const ( - ControlFlagFin ControlFlags = 0x01 - ControlFlagUnidirectional = 0x02 - ControlFlagSettingsClearSettings = 0x01 -) - -// DataFlags are the flags that can be set on a data frame. -type DataFlags uint8 - -const ( - DataFlagFin DataFlags = 0x01 -) - -// MaxDataLength is the maximum number of bytes that can be stored in one frame. -const MaxDataLength = 1<<24 - 1 - -// headerValueSepator separates multiple header values. -const headerValueSeparator = "\x00" - -// Frame is a single SPDY frame in its unpacked in-memory representation. Use -// Framer to read and write it. -type Frame interface { - write(f *Framer) error -} - -// ControlFrameHeader contains all the fields in a control frame header, -// in its unpacked in-memory representation. -type ControlFrameHeader struct { - // Note, high bit is the "Control" bit. - version uint16 // spdy version number - frameType ControlFrameType - Flags ControlFlags - length uint32 // length of data field -} - -type controlFrame interface { - Frame - read(h ControlFrameHeader, f *Framer) error -} - -// StreamId represents a 31-bit value identifying the stream. -type StreamId uint32 - -// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM -// frame. -type SynStreamFrame struct { - CFHeader ControlFrameHeader - StreamId StreamId - AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to - Priority uint8 // priority of this frame (3-bit) - Slot uint8 // index in the server's credential vector of the client certificate - Headers http.Header -} - -// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. -type SynReplyFrame struct { - CFHeader ControlFrameHeader - StreamId StreamId - Headers http.Header -} - -// RstStreamStatus represents the status that led to a RST_STREAM. -type RstStreamStatus uint32 - -const ( - ProtocolError RstStreamStatus = iota + 1 - InvalidStream - RefusedStream - UnsupportedVersion - Cancel - InternalError - FlowControlError - StreamInUse - StreamAlreadyClosed - InvalidCredentials - FrameTooLarge -) - -// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM -// frame. -type RstStreamFrame struct { - CFHeader ControlFrameHeader - StreamId StreamId - Status RstStreamStatus -} - -// SettingsFlag represents a flag in a SETTINGS frame. -type SettingsFlag uint8 - -const ( - FlagSettingsPersistValue SettingsFlag = 0x1 - FlagSettingsPersisted = 0x2 -) - -// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. -type SettingsId uint32 - -const ( - SettingsUploadBandwidth SettingsId = iota + 1 - SettingsDownloadBandwidth - SettingsRoundTripTime - SettingsMaxConcurrentStreams - SettingsCurrentCwnd - SettingsDownloadRetransRate - SettingsInitialWindowSize - SettingsClientCretificateVectorSize -) - -// SettingsFlagIdValue is the unpacked, in-memory representation of the -// combined flag/id/value for a setting in a SETTINGS frame. -type SettingsFlagIdValue struct { - Flag SettingsFlag - Id SettingsId - Value uint32 -} - -// SettingsFrame is the unpacked, in-memory representation of a SPDY -// SETTINGS frame. -type SettingsFrame struct { - CFHeader ControlFrameHeader - FlagIdValues []SettingsFlagIdValue -} - -// PingFrame is the unpacked, in-memory representation of a PING frame. -type PingFrame struct { - CFHeader ControlFrameHeader - Id uint32 // unique id for this ping, from server is even, from client is odd. -} - -// GoAwayStatus represents the status in a GoAwayFrame. -type GoAwayStatus uint32 - -const ( - GoAwayOK GoAwayStatus = iota - GoAwayProtocolError - GoAwayInternalError -) - -// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. -type GoAwayFrame struct { - CFHeader ControlFrameHeader - LastGoodStreamId StreamId // last stream id which was accepted by sender - Status GoAwayStatus -} - -// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. -type HeadersFrame struct { - CFHeader ControlFrameHeader - StreamId StreamId - Headers http.Header -} - -// WindowUpdateFrame is the unpacked, in-memory representation of a -// WINDOW_UPDATE frame. -type WindowUpdateFrame struct { - CFHeader ControlFrameHeader - StreamId StreamId - DeltaWindowSize uint32 // additional number of bytes to existing window size -} - -// TODO: Implement credential frame and related methods. - -// DataFrame is the unpacked, in-memory representation of a DATA frame. -type DataFrame struct { - // Note, high bit is the "Control" bit. Should be 0 for data frames. - StreamId StreamId - Flags DataFlags - Data []byte // payload data of this frame -} - -// A SPDY specific error. -type ErrorCode string - -const ( - UnlowercasedHeaderName ErrorCode = "header was not lowercased" - DuplicateHeaders = "multiple headers with same name" - WrongCompressedPayloadSize = "compressed payload size was incorrect" - UnknownFrameType = "unknown frame type" - InvalidControlFrame = "invalid control frame" - InvalidDataFrame = "invalid data frame" - InvalidHeaderPresent = "frame contained invalid header" - ZeroStreamId = "stream id zero is disallowed" -) - -// Error contains both the type of error and additional values. StreamId is 0 -// if Error is not associated with a stream. -type Error struct { - Err ErrorCode - StreamId StreamId -} - -func (e *Error) Error() string { - return string(e.Err) -} - -var invalidReqHeaders = map[string]bool{ - "Connection": true, - "Host": true, - "Keep-Alive": true, - "Proxy-Connection": true, - "Transfer-Encoding": true, -} - -var invalidRespHeaders = map[string]bool{ - "Connection": true, - "Keep-Alive": true, - "Proxy-Connection": true, - "Transfer-Encoding": true, -} - -// Framer handles serializing/deserializing SPDY frames, including compressing/ -// decompressing payloads. -type Framer struct { - headerCompressionDisabled bool - w io.Writer - headerBuf *bytes.Buffer - headerCompressor *zlib.Writer - r io.Reader - headerReader io.LimitedReader - headerDecompressor io.ReadCloser -} - -// NewFramer allocates a new Framer for a given SPDY connection, represented by -// a io.Writer and io.Reader. Note that Framer will read and write individual fields -// from/to the Reader and Writer, so the caller should pass in an appropriately -// buffered implementation to optimize performance. -func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { - compressBuf := new(bytes.Buffer) - compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) - if err != nil { - return nil, err - } - framer := &Framer{ - w: w, - headerBuf: compressBuf, - headerCompressor: compressor, - r: r, - } - return framer, nil -} diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go deleted file mode 100644 index b212f66..0000000 --- a/vendor/github.com/docker/spdystream/spdy/write.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -import ( - "encoding/binary" - "io" - "net/http" - "strings" -) - -func (frame *SynStreamFrame) write(f *Framer) error { - return f.writeSynStreamFrame(frame) -} - -func (frame *SynReplyFrame) write(f *Framer) error { - return f.writeSynReplyFrame(frame) -} - -func (frame *RstStreamFrame) write(f *Framer) (err error) { - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeRstStream - frame.CFHeader.Flags = 0 - frame.CFHeader.length = 8 - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - if frame.Status == 0 { - return &Error{InvalidControlFrame, frame.StreamId} - } - if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { - return - } - return -} - -func (frame *SettingsFrame) write(f *Framer) (err error) { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeSettings - frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { - return - } - for _, flagIdValue := range frame.FlagIdValues { - flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) - if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { - return - } - } - return -} - -func (frame *PingFrame) write(f *Framer) (err error) { - if frame.Id == 0 { - return &Error{ZeroStreamId, 0} - } - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypePing - frame.CFHeader.Flags = 0 - frame.CFHeader.length = 4 - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { - return - } - return -} - -func (frame *GoAwayFrame) write(f *Framer) (err error) { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeGoAway - frame.CFHeader.Flags = 0 - frame.CFHeader.length = 8 - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { - return - } - return nil -} - -func (frame *HeadersFrame) write(f *Framer) error { - return f.writeHeadersFrame(frame) -} - -func (frame *WindowUpdateFrame) write(f *Framer) (err error) { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeWindowUpdate - frame.CFHeader.Flags = 0 - frame.CFHeader.length = 8 - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { - return - } - return nil -} - -func (frame *DataFrame) write(f *Framer) error { - return f.writeDataFrame(frame) -} - -// WriteFrame writes a frame. -func (f *Framer) WriteFrame(frame Frame) error { - return frame.write(f) -} - -func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { - if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { - return err - } - if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { - return err - } - flagsAndLength := uint32(h.Flags)<<24 | h.length - if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { - return err - } - return nil -} - -func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { - n = 0 - if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { - return - } - n += 2 - for name, values := range h { - if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { - return - } - n += 2 - name = strings.ToLower(name) - if _, err = io.WriteString(w, name); err != nil { - return - } - n += len(name) - v := strings.Join(values, headerValueSeparator) - if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { - return - } - n += 2 - if _, err = io.WriteString(w, v); err != nil { - return - } - n += len(v) - } - return -} - -func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - // Marshal the headers. - var writer io.Writer = f.headerBuf - if !f.headerCompressionDisabled { - writer = f.headerCompressor - } - if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { - return - } - if !f.headerCompressionDisabled { - f.headerCompressor.Flush() - } - - // Set ControlFrameHeader. - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeSynStream - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return err - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return err - } - if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { - return err - } - if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { - return err - } - if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { - return err - } - if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { - return err - } - f.headerBuf.Reset() - return nil -} - -func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - // Marshal the headers. - var writer io.Writer = f.headerBuf - if !f.headerCompressionDisabled { - writer = f.headerCompressor - } - if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { - return - } - if !f.headerCompressionDisabled { - f.headerCompressor.Flush() - } - - // Set ControlFrameHeader. - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeSynReply - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { - return - } - f.headerBuf.Reset() - return -} - -func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - // Marshal the headers. - var writer io.Writer = f.headerBuf - if !f.headerCompressionDisabled { - writer = f.headerCompressor - } - if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { - return - } - if !f.headerCompressionDisabled { - f.headerCompressor.Flush() - } - - // Set ControlFrameHeader. - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeHeaders - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) - - // Serialize frame to Writer. - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { - return - } - f.headerBuf.Reset() - return -} - -func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { - if frame.StreamId == 0 { - return &Error{ZeroStreamId, 0} - } - if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { - return &Error{InvalidDataFrame, frame.StreamId} - } - - // Serialize frame to Writer. - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) - if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { - return - } - if _, err = f.w.Write(frame.Data); err != nil { - return - } - return nil -} diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go deleted file mode 100644 index f9e9ee2..0000000 --- a/vendor/github.com/docker/spdystream/stream.go +++ /dev/null @@ -1,327 +0,0 @@ -package spdystream - -import ( - "errors" - "fmt" - "io" - "net" - "net/http" - "sync" - "time" - - "github.com/docker/spdystream/spdy" -) - -var ( - ErrUnreadPartialData = errors.New("unread partial data") -) - -type Stream struct { - streamId spdy.StreamId - parent *Stream - conn *Connection - startChan chan error - - dataLock sync.RWMutex - dataChan chan []byte - unread []byte - - priority uint8 - headers http.Header - headerChan chan http.Header - finishLock sync.Mutex - finished bool - replyCond *sync.Cond - replied bool - closeLock sync.Mutex - closeChan chan bool -} - -// WriteData writes data to stream, sending a dataframe per call -func (s *Stream) WriteData(data []byte, fin bool) error { - s.waitWriteReply() - var flags spdy.DataFlags - - if fin { - flags = spdy.DataFlagFin - s.finishLock.Lock() - if s.finished { - s.finishLock.Unlock() - return ErrWriteClosedStream - } - s.finished = true - s.finishLock.Unlock() - } - - dataFrame := &spdy.DataFrame{ - StreamId: s.streamId, - Flags: flags, - Data: data, - } - - debugMessage("(%p) (%d) Writing data frame", s, s.streamId) - return s.conn.framer.WriteFrame(dataFrame) -} - -// Write writes bytes to a stream, calling write data for each call. -func (s *Stream) Write(data []byte) (n int, err error) { - err = s.WriteData(data, false) - if err == nil { - n = len(data) - } - return -} - -// Read reads bytes from a stream, a single read will never get more -// than what is sent on a single data frame, but a multiple calls to -// read may get data from the same data frame. -func (s *Stream) Read(p []byte) (n int, err error) { - if s.unread == nil { - select { - case <-s.closeChan: - return 0, io.EOF - case read, ok := <-s.dataChan: - if !ok { - return 0, io.EOF - } - s.unread = read - } - } - n = copy(p, s.unread) - if n < len(s.unread) { - s.unread = s.unread[n:] - } else { - s.unread = nil - } - return -} - -// ReadData reads an entire data frame and returns the byte array -// from the data frame. If there is unread data from the result -// of a Read call, this function will return an ErrUnreadPartialData. -func (s *Stream) ReadData() ([]byte, error) { - debugMessage("(%p) Reading data from %d", s, s.streamId) - if s.unread != nil { - return nil, ErrUnreadPartialData - } - select { - case <-s.closeChan: - return nil, io.EOF - case read, ok := <-s.dataChan: - if !ok { - return nil, io.EOF - } - return read, nil - } -} - -func (s *Stream) waitWriteReply() { - if s.replyCond != nil { - s.replyCond.L.Lock() - for !s.replied { - s.replyCond.Wait() - } - s.replyCond.L.Unlock() - } -} - -// Wait waits for the stream to receive a reply. -func (s *Stream) Wait() error { - return s.WaitTimeout(time.Duration(0)) -} - -// WaitTimeout waits for the stream to receive a reply or for timeout. -// When the timeout is reached, ErrTimeout will be returned. -func (s *Stream) WaitTimeout(timeout time.Duration) error { - var timeoutChan <-chan time.Time - if timeout > time.Duration(0) { - timeoutChan = time.After(timeout) - } - - select { - case err := <-s.startChan: - if err != nil { - return err - } - break - case <-timeoutChan: - return ErrTimeout - } - return nil -} - -// Close closes the stream by sending an empty data frame with the -// finish flag set, indicating this side is finished with the stream. -func (s *Stream) Close() error { - select { - case <-s.closeChan: - // Stream is now fully closed - s.conn.removeStream(s) - default: - break - } - return s.WriteData([]byte{}, true) -} - -// Reset sends a reset frame, putting the stream into the fully closed state. -func (s *Stream) Reset() error { - s.conn.removeStream(s) - return s.resetStream() -} - -func (s *Stream) resetStream() error { - // Always call closeRemoteChannels, even if s.finished is already true. - // This makes it so that stream.Close() followed by stream.Reset() allows - // stream.Read() to unblock. - s.closeRemoteChannels() - - s.finishLock.Lock() - if s.finished { - s.finishLock.Unlock() - return nil - } - s.finished = true - s.finishLock.Unlock() - - resetFrame := &spdy.RstStreamFrame{ - StreamId: s.streamId, - Status: spdy.Cancel, - } - return s.conn.framer.WriteFrame(resetFrame) -} - -// CreateSubStream creates a stream using the current as the parent -func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { - return s.conn.CreateStream(headers, s, fin) -} - -// SetPriority sets the stream priority, does not affect the -// remote priority of this stream after Open has been called. -// Valid values are 0 through 7, 0 being the highest priority -// and 7 the lowest. -func (s *Stream) SetPriority(priority uint8) { - s.priority = priority -} - -// SendHeader sends a header frame across the stream -func (s *Stream) SendHeader(headers http.Header, fin bool) error { - return s.conn.sendHeaders(headers, s, fin) -} - -// SendReply sends a reply on a stream, only valid to be called once -// when handling a new stream -func (s *Stream) SendReply(headers http.Header, fin bool) error { - if s.replyCond == nil { - return errors.New("cannot reply on initiated stream") - } - s.replyCond.L.Lock() - defer s.replyCond.L.Unlock() - if s.replied { - return nil - } - - err := s.conn.sendReply(headers, s, fin) - if err != nil { - return err - } - - s.replied = true - s.replyCond.Broadcast() - return nil -} - -// Refuse sends a reset frame with the status refuse, only -// valid to be called once when handling a new stream. This -// may be used to indicate that a stream is not allowed -// when http status codes are not being used. -func (s *Stream) Refuse() error { - if s.replied { - return nil - } - s.replied = true - return s.conn.sendReset(spdy.RefusedStream, s) -} - -// Cancel sends a reset frame with the status canceled. This -// can be used at any time by the creator of the Stream to -// indicate the stream is no longer needed. -func (s *Stream) Cancel() error { - return s.conn.sendReset(spdy.Cancel, s) -} - -// ReceiveHeader receives a header sent on the other side -// of the stream. This function will block until a header -// is received or stream is closed. -func (s *Stream) ReceiveHeader() (http.Header, error) { - select { - case <-s.closeChan: - break - case header, ok := <-s.headerChan: - if !ok { - return nil, fmt.Errorf("header chan closed") - } - return header, nil - } - return nil, fmt.Errorf("stream closed") -} - -// Parent returns the parent stream -func (s *Stream) Parent() *Stream { - return s.parent -} - -// Headers returns the headers used to create the stream -func (s *Stream) Headers() http.Header { - return s.headers -} - -// String returns the string version of stream using the -// streamId to uniquely identify the stream -func (s *Stream) String() string { - return fmt.Sprintf("stream:%d", s.streamId) -} - -// Identifier returns a 32 bit identifier for the stream -func (s *Stream) Identifier() uint32 { - return uint32(s.streamId) -} - -// IsFinished returns whether the stream has finished -// sending data -func (s *Stream) IsFinished() bool { - return s.finished -} - -// Implement net.Conn interface - -func (s *Stream) LocalAddr() net.Addr { - return s.conn.conn.LocalAddr() -} - -func (s *Stream) RemoteAddr() net.Addr { - return s.conn.conn.RemoteAddr() -} - -// TODO set per stream values instead of connection-wide - -func (s *Stream) SetDeadline(t time.Time) error { - return s.conn.conn.SetDeadline(t) -} - -func (s *Stream) SetReadDeadline(t time.Time) error { - return s.conn.conn.SetReadDeadline(t) -} - -func (s *Stream) SetWriteDeadline(t time.Time) error { - return s.conn.conn.SetWriteDeadline(t) -} - -func (s *Stream) closeRemoteChannels() { - s.closeLock.Lock() - defer s.closeLock.Unlock() - select { - case <-s.closeChan: - default: - close(s.closeChan) - } -} diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go deleted file mode 100644 index 1b2c199..0000000 --- a/vendor/github.com/docker/spdystream/utils.go +++ /dev/null @@ -1,16 +0,0 @@ -package spdystream - -import ( - "log" - "os" -) - -var ( - DEBUG = os.Getenv("DEBUG") -) - -func debugMessage(fmt string, args ...interface{}) { - if DEBUG != "" { - log.Printf(fmt, args...) - } -} diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore deleted file mode 100644 index cece7be..0000000 --- a/vendor/github.com/emicklei/go-restful/.gitignore +++ /dev/null @@ -1,70 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -restful.html - -*.out - -tmp.prof - -go-restful.test - -examples/restful-basic-authentication - -examples/restful-encoding-filter - -examples/restful-filters - -examples/restful-hello-world - -examples/restful-resource-functions - -examples/restful-serve-static - -examples/restful-user-service - -*.DS_Store -examples/restful-user-resource - -examples/restful-multi-containers - -examples/restful-form-handling - -examples/restful-CORS-filter - -examples/restful-options-filter - -examples/restful-curly-router - -examples/restful-cpuprofiler-service - -examples/restful-pre-post-filters - -curly.prof - -examples/restful-NCSA-logging - -examples/restful-html-template - -s.html -restful-path-tail diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml deleted file mode 100644 index b22f8f5..0000000 --- a/vendor/github.com/emicklei/go-restful/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.x - -script: go test -v \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md deleted file mode 100644 index e525296..0000000 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ /dev/null @@ -1,273 +0,0 @@ -## Change history of go-restful - - -v2.9.5 -- fix panic in Response.WriteError if err == nil - -v2.9.4 - -- fix issue #400 , parsing mime type quality -- Route Builder added option for contentEncodingEnabled (#398) - -v2.9.3 - -- Avoid return of 415 Unsupported Media Type when request body is empty (#396) - -v2.9.2 - -- Reduce allocations in per-request methods to improve performance (#395) - -v2.9.1 - -- Fix issue with default responses and invalid status code 0. (#393) - -v2.9.0 - -- add per Route content encoding setting (overrides container setting) - -v2.8.0 - -- add Request.QueryParameters() -- add json-iterator (via build tag) -- disable vgo module (until log is moved) - -v2.7.1 - -- add vgo module - -v2.6.1 - -- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) - -v2.6.0 - -- Make JSR 311 routing and path param processing consistent -- Adding description to RouteBuilder.Reads() -- Update example for Swagger12 and OpenAPI - -2017-09-13 - -- added route condition functions using `.If(func)` in route building. - -2017-02-16 - -- solved issue #304, make operation names unique - -2017-01-30 - - [IMPORTANT] For swagger users, change your import statement to: - swagger "github.com/emicklei/go-restful-swagger12" - -- moved swagger 1.2 code to go-restful-swagger12 -- created TAG 2.0.0 - -2017-01-27 - -- remove defer request body close -- expose Dispatch for testing filters and Routefunctions -- swagger response model cannot be array -- created TAG 1.0.0 - -2016-12-22 - -- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) - -2016-11-26 - -- Default change! now use CurlyRouter (was RouterJSR311) -- Default change! no more caching of request content -- Default change! do not recover from panics - -2016-09-22 - -- fix the DefaultRequestContentType feature - -2016-02-14 - -- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response -- add constructors for custom entity accessors for xml and json - -2015-09-27 - -- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - -2015-09-25 - -- fixed problem with changing Header after WriteHeader (issue 235) - -2015-09-14 - -- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) -- added support for custom EntityReaderWriters. - -2015-08-06 - -- add support for reading entities from compressed request content -- use sync.Pool for compressors of http response and request body -- add Description to Parameter for documentation in Swagger UI - -2015-03-20 - -- add configurable logging - -2015-03-18 - -- if not specified, the Operation is derived from the Route function - -2015-03-17 - -- expose Parameter creation functions -- make trace logger an interface -- fix OPTIONSFilter -- customize rendering of ServiceError -- JSR311 router now handles wildcards -- add Notes to Route - -2014-11-27 - -- (api add) PrettyPrint per response. (as proposed in #167) - -2014-11-12 - -- (api add) ApiVersion(.) for documentation in Swagger UI - -2014-11-10 - -- (api change) struct fields tagged with "description" show up in Swagger UI - -2014-10-31 - -- (api change) ReturnsError -> Returns -- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder -- fix swagger nested structs -- sort Swagger response messages by code - -2014-10-23 - -- (api add) ReturnsError allows you to document Http codes in swagger -- fixed problem with greedy CurlyRouter -- (api add) Access-Control-Max-Age in CORS -- add tracing functionality (injectable) for debugging purposes -- support JSON parse 64bit int -- fix empty parameters for swagger -- WebServicesUrl is now optional for swagger -- fixed duplicate AccessControlAllowOrigin in CORS -- (api change) expose ServeMux in container -- (api add) added AllowedDomains in CORS -- (api add) ParameterNamed for detailed documentation - -2014-04-16 - -- (api add) expose constructor of Request for testing. - -2014-06-27 - -- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). -- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - -2014-07-03 - -- (api add) CORS can be configured with a list of allowed domains - -2014-03-12 - -- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - -2014-02-26 - -- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - -2014-02-17 - -- (api change) renamed parameter constants (go-lint checks) - -2014-01-10 - -- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier - -2014-01-07 - -- (api change) Write* methods in Response now return the error or nil. -- added example of serving HTML from a Go template. -- fixed comparing Allowed headers in CORS (is now case-insensitive) - -2013-11-13 - -- (api add) Response knows how many bytes are written to the response body. - -2013-10-29 - -- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. - -2013-10-04 - -- (api add) Response knows what HTTP status has been written -- (api add) Request can have attributes (map of string->interface, also called request-scoped variables - -2013-09-12 - -- (api change) Router interface simplified -- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - -2013-08-05 - - add OPTIONS support - - add CORS support - -2013-08-27 - -- fixed some reported issues (see github) -- (api change) deprecated use of WriteError; use WriteErrorString instead - -2014-04-15 - -- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString - -2013-08-08 - -- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. -- (api add) the swagger package has be extended to have a UI per container. -- if panic is detected then a small stack trace is printed (thanks to runner-mei) -- (api add) WriteErrorString to Response - -Important API changes: - -- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. -- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - - -2013-07-06 - -- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - -2013-06-19 - -- (improve) DoNotRecover option, moved request body closer, improved ReadEntity - -2013-06-03 - -- (api change) removed Dispatcher interface, hide PathExpression -- changed receiver names of type functions to be more idiomatic Go - -2013-06-02 - -- (optimize) Cache the RegExp compilation of Paths. - -2013-05-22 - -- (api add) Added support for request/response filter functions - -2013-05-18 - - -- (api add) Added feature to change the default Http Request Dispatch function (travis cline) -- (api change) Moved Swagger Webservice to swagger package (see example restful-user) - -[2012-11-14 .. 2013-05-18> - -- See https://github.com/emicklei/go-restful/commits - -2012-11-14 - -- Initial commit - - diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE deleted file mode 100644 index ece7ec6..0000000 --- a/vendor/github.com/emicklei/go-restful/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012,2013 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile deleted file mode 100644 index b40081c..0000000 --- a/vendor/github.com/emicklei/go-restful/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: test - -test: - go test -v . - -ex: - cd examples && ls *.go | xargs go build -o /tmp/ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md deleted file mode 100644 index f52c25a..0000000 --- a/vendor/github.com/emicklei/go-restful/README.md +++ /dev/null @@ -1,88 +0,0 @@ -go-restful -========== -package for building REST-style Web Services using Google Go - -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) -[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful) - -- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) - -REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: - -- GET = Retrieve a representation of a resource -- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. -- PUT = Create if you are sending the full content of the specified resource (URI). -- PUT = Update if you are updating the full content of the specified resource. -- DELETE = Delete if you are requesting the server to delete the resource -- PATCH = Update partial content of a resource -- OPTIONS = Get information about the communication options for the request URI - -### Example - -```Go -ws := new(restful.WebService) -ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - -ws.Route(ws.GET("/{user-id}").To(u.findUser). - Doc("get a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) -... - -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... -} -``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) - -### Features - -- Routes for request → function mapping with path parameter (e.g. {id}) support -- Configurable router: - - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*} - - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) -- Response API for writing structs to JSON/XML and setting headers -- Customizable encoding using EntityReaderWriter registration -- Filters for intercepting the request → response flow on Service or Route level -- Request-scoped variables using attributes -- Containers for WebServices on different HTTP endpoints -- Content encoding (gzip,deflate) of request and response payloads -- Automatic responses on OPTIONS (using a filter) -- Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) -- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) -- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) -- Configurable (trace) logging -- Customizable gzip/deflate readers and writers using CompressorProvider registration - -## How to customize -There are several hooks to customize the behavior of the go-restful package. - -- Router algorithm -- Panic recovery -- JSON decoder -- Trace logging -- Compression -- Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` - -TODO: write examples of these. - -## Resources - -- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) -- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) -- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) -- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia) -- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) - -Type ```git shortlog -s``` for a full list of contributors. - -© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile deleted file mode 100644 index 16fd186..0000000 --- a/vendor/github.com/emicklei/go-restful/Srcfile +++ /dev/null @@ -1 +0,0 @@ -{"SkipDirs": ["examples"]} diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/bench_test.sh deleted file mode 100644 index 47ffbe4..0000000 --- a/vendor/github.com/emicklei/go-restful/bench_test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out - -go test -c -./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany -./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly - -#go tool pprof go-restful.test tmp.prof -go tool pprof go-restful.test curly.prof - - diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go deleted file mode 100644 index 220b377..0000000 --- a/vendor/github.com/emicklei/go-restful/compress.go +++ /dev/null @@ -1,123 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "compress/gzip" - "compress/zlib" - "errors" - "io" - "net" - "net/http" - "strings" -) - -// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. -var EnableContentEncoding = false - -// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) -type CompressingResponseWriter struct { - writer http.ResponseWriter - compressor io.WriteCloser - encoding string -} - -// Header is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) Header() http.Header { - return c.writer.Header() -} - -// WriteHeader is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) WriteHeader(status int) { - c.writer.WriteHeader(status) -} - -// Write is part of http.ResponseWriter interface -// It is passed through the compressor -func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { - if c.isCompressorClosed() { - return -1, errors.New("Compressing error: tried to write data using closed compressor") - } - return c.compressor.Write(bytes) -} - -// CloseNotify is part of http.CloseNotifier interface -func (c *CompressingResponseWriter) CloseNotify() <-chan bool { - return c.writer.(http.CloseNotifier).CloseNotify() -} - -// Close the underlying compressor -func (c *CompressingResponseWriter) Close() error { - if c.isCompressorClosed() { - return errors.New("Compressing error: tried to close already closed compressor") - } - - c.compressor.Close() - if ENCODING_GZIP == c.encoding { - currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) - } - if ENCODING_DEFLATE == c.encoding { - currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) - } - // gc hint needed? - c.compressor = nil - return nil -} - -func (c *CompressingResponseWriter) isCompressorClosed() bool { - return nil == c.compressor -} - -// Hijack implements the Hijacker interface -// This is especially useful when combining Container.EnabledContentEncoding -// in combination with websockets (for instance gorilla/websocket) -func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := c.writer.(http.Hijacker) - if !ok { - return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") - } - return hijacker.Hijack() -} - -// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { - header := httpRequest.Header.Get(HEADER_AcceptEncoding) - gi := strings.Index(header, ENCODING_GZIP) - zi := strings.Index(header, ENCODING_DEFLATE) - // use in order of appearance - if gi == -1 { - return zi != -1, ENCODING_DEFLATE - } else if zi == -1 { - return gi != -1, ENCODING_GZIP - } else { - if gi < zi { - return true, ENCODING_GZIP - } - return true, ENCODING_DEFLATE - } -} - -// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} -func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { - httpWriter.Header().Set(HEADER_ContentEncoding, encoding) - c := new(CompressingResponseWriter) - c.writer = httpWriter - var err error - if ENCODING_GZIP == encoding { - w := currentCompressorProvider.AcquireGzipWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_GZIP - } else if ENCODING_DEFLATE == encoding { - w := currentCompressorProvider.AcquireZlibWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_DEFLATE - } else { - return nil, errors.New("Unknown encoding:" + encoding) - } - return c, err -} diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go deleted file mode 100644 index ee42601..0000000 --- a/vendor/github.com/emicklei/go-restful/compressor_cache.go +++ /dev/null @@ -1,103 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount -// of writers and readers (resources). -// If a new resource is acquired and all are in use, it will return a new unmanaged resource. -type BoundedCachedCompressors struct { - gzipWriters chan *gzip.Writer - gzipReaders chan *gzip.Reader - zlibWriters chan *zlib.Writer - writersCapacity int - readersCapacity int -} - -// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. -func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { - b := &BoundedCachedCompressors{ - gzipWriters: make(chan *gzip.Writer, writersCapacity), - gzipReaders: make(chan *gzip.Reader, readersCapacity), - zlibWriters: make(chan *zlib.Writer, writersCapacity), - writersCapacity: writersCapacity, - readersCapacity: readersCapacity, - } - for ix := 0; ix < writersCapacity; ix++ { - b.gzipWriters <- newGzipWriter() - b.zlibWriters <- newZlibWriter() - } - for ix := 0; ix < readersCapacity; ix++ { - b.gzipReaders <- newGzipReader() - } - return b -} - -// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { - var writer *gzip.Writer - select { - case writer, _ = <-b.gzipWriters: - default: - // return a new unmanaged one - writer = newGzipWriter() - } - return writer -} - -// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { - // forget the unmanaged ones - if len(b.gzipWriters) < b.writersCapacity { - b.gzipWriters <- w - } -} - -// AcquireGzipReader returns a *gzip.Reader. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { - var reader *gzip.Reader - select { - case reader, _ = <-b.gzipReaders: - default: - // return a new unmanaged one - reader = newGzipReader() - } - return reader -} - -// ReleaseGzipReader accepts a reader (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { - // forget the unmanaged ones - if len(b.gzipReaders) < b.readersCapacity { - b.gzipReaders <- r - } -} - -// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { - var writer *zlib.Writer - select { - case writer, _ = <-b.zlibWriters: - default: - // return a new unmanaged one - writer = newZlibWriter() - } - return writer -} - -// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { - // forget the unmanaged ones - if len(b.zlibWriters) < b.writersCapacity { - b.zlibWriters <- w - } -} diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go deleted file mode 100644 index d866ce6..0000000 --- a/vendor/github.com/emicklei/go-restful/compressor_pools.go +++ /dev/null @@ -1,91 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "sync" -) - -// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. -type SyncPoolCompessors struct { - GzipWriterPool *sync.Pool - GzipReaderPool *sync.Pool - ZlibWriterPool *sync.Pool -} - -// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. -func NewSyncPoolCompessors() *SyncPoolCompessors { - return &SyncPoolCompessors{ - GzipWriterPool: &sync.Pool{ - New: func() interface{} { return newGzipWriter() }, - }, - GzipReaderPool: &sync.Pool{ - New: func() interface{} { return newGzipReader() }, - }, - ZlibWriterPool: &sync.Pool{ - New: func() interface{} { return newZlibWriter() }, - }, - } -} - -func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { - return s.GzipWriterPool.Get().(*gzip.Writer) -} - -func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { - s.GzipWriterPool.Put(w) -} - -func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { - return s.GzipReaderPool.Get().(*gzip.Reader) -} - -func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { - s.GzipReaderPool.Put(r) -} - -func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { - return s.ZlibWriterPool.Get().(*zlib.Writer) -} - -func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { - s.ZlibWriterPool.Put(w) -} - -func newGzipWriter() *gzip.Writer { - // create with an empty bytes writer; it will be replaced before using the gzipWriter - writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} - -func newGzipReader() *gzip.Reader { - // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader - // we can safely use currentCompressProvider because it is set on package initialization. - w := currentCompressorProvider.AcquireGzipWriter() - defer currentCompressorProvider.ReleaseGzipWriter(w) - b := new(bytes.Buffer) - w.Reset(b) - w.Flush() - w.Close() - reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) - if err != nil { - panic(err.Error()) - } - return reader -} - -func newZlibWriter() *zlib.Writer { - writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go deleted file mode 100644 index 9db4a8c..0000000 --- a/vendor/github.com/emicklei/go-restful/compressors.go +++ /dev/null @@ -1,54 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// CompressorProvider describes a component that can provider compressors for the std methods. -type CompressorProvider interface { - // Returns a *gzip.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireGzipWriter() *gzip.Writer - - // Releases an acquired *gzip.Writer. - ReleaseGzipWriter(w *gzip.Writer) - - // Returns a *gzip.Reader which needs to be released later. - AcquireGzipReader() *gzip.Reader - - // Releases an acquired *gzip.Reader. - ReleaseGzipReader(w *gzip.Reader) - - // Returns a *zlib.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireZlibWriter() *zlib.Writer - - // Releases an acquired *zlib.Writer. - ReleaseZlibWriter(w *zlib.Writer) -} - -// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). -var currentCompressorProvider CompressorProvider - -func init() { - currentCompressorProvider = NewSyncPoolCompessors() -} - -// CurrentCompressorProvider returns the current CompressorProvider. -// It is initialized using a SyncPoolCompessors. -func CurrentCompressorProvider() CompressorProvider { - return currentCompressorProvider -} - -// SetCompressorProvider sets the actual provider of compressors (zlib or gzip). -func SetCompressorProvider(p CompressorProvider) { - if p == nil { - panic("cannot set compressor provider to nil") - } - currentCompressorProvider = p -} diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go deleted file mode 100644 index 203439c..0000000 --- a/vendor/github.com/emicklei/go-restful/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default - - HEADER_Allow = "Allow" - HEADER_Accept = "Accept" - HEADER_Origin = "Origin" - HEADER_ContentType = "Content-Type" - HEADER_LastModified = "Last-Modified" - HEADER_AcceptEncoding = "Accept-Encoding" - HEADER_ContentEncoding = "Content-Encoding" - HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" - HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" - HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" - HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" - HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" - HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" - HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" - HEADER_AccessControlMaxAge = "Access-Control-Max-Age" - - ENCODING_GZIP = "gzip" - ENCODING_DEFLATE = "deflate" -) diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go deleted file mode 100644 index 061a8d7..0000000 --- a/vendor/github.com/emicklei/go-restful/container.go +++ /dev/null @@ -1,377 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "errors" - "fmt" - "net/http" - "os" - "runtime" - "strings" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. -// The requests are further dispatched to routes of WebServices using a RouteSelector -type Container struct { - webServicesLock sync.RWMutex - webServices []*WebService - ServeMux *http.ServeMux - isRegisteredOnRoot bool - containerFilters []FilterFunction - doNotRecover bool // default is true - recoverHandleFunc RecoverHandleFunction - serviceErrorHandleFunc ServiceErrorHandleFunction - router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative) - contentEncodingEnabled bool // default is false -} - -// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter) -func NewContainer() *Container { - return &Container{ - webServices: []*WebService{}, - ServeMux: http.NewServeMux(), - isRegisteredOnRoot: false, - containerFilters: []FilterFunction{}, - doNotRecover: true, - recoverHandleFunc: logStackOnRecover, - serviceErrorHandleFunc: writeServiceError, - router: CurlyRouter{}, - contentEncodingEnabled: false} -} - -// RecoverHandleFunction declares functions that can be used to handle a panic situation. -// The first argument is what recover() returns. The second must be used to communicate an error response. -type RecoverHandleFunction func(interface{}, http.ResponseWriter) - -// RecoverHandler changes the default function (logStackOnRecover) to be called -// when a panic is detected. DoNotRecover must be have its default value (=false). -func (c *Container) RecoverHandler(handler RecoverHandleFunction) { - c.recoverHandleFunc = handler -} - -// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. -// The first argument is the service error, the second is the request that resulted in the error and -// the third must be used to communicate an error response. -type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) - -// ServiceErrorHandler changes the default function (writeServiceError) to be called -// when a ServiceError is detected. -func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { - c.serviceErrorHandleFunc = handler -} - -// DoNotRecover controls whether panics will be caught to return HTTP 500. -// If set to true, Route functions are responsible for handling any error situation. -// Default value is true. -func (c *Container) DoNotRecover(doNot bool) { - c.doNotRecover = doNot -} - -// Router changes the default Router (currently CurlyRouter) -func (c *Container) Router(aRouter RouteSelector) { - c.router = aRouter -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. -func (c *Container) EnableContentEncoding(enabled bool) { - c.contentEncodingEnabled = enabled -} - -// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. -func (c *Container) Add(service *WebService) *Container { - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - - // if rootPath was not set then lazy initialize it - if len(service.rootPath) == 0 { - service.Path("/") - } - - // cannot have duplicate root paths - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - log.Printf("WebService with duplicate root path detected:['%v']", each) - os.Exit(1) - } - } - - // If not registered on root then add specific mapping - if !c.isRegisteredOnRoot { - c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) - } - c.webServices = append(c.webServices, service) - return c -} - -// addHandler may set a new HandleFunc for the serveMux -// this function must run inside the critical region protected by the webServicesLock. -// returns true if the function was registered on root ("/") -func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { - pattern := fixedPrefixPath(service.RootPath()) - // check if root path registration is needed - if "/" == pattern || "" == pattern { - serveMux.HandleFunc("/", c.dispatch) - return true - } - // detect if registration already exists - alreadyMapped := false - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - alreadyMapped = true - break - } - } - if !alreadyMapped { - serveMux.HandleFunc(pattern, c.dispatch) - if !strings.HasSuffix(pattern, "/") { - serveMux.HandleFunc(pattern+"/", c.dispatch) - } - } - return false -} - -func (c *Container) Remove(ws *WebService) error { - if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) - log.Print(errMsg) - return errors.New(errMsg) - } - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - // build a new ServeMux and re-register all WebServices - newServeMux := http.NewServeMux() - newServices := []*WebService{} - newIsRegisteredOnRoot := false - for _, each := range c.webServices { - if each.rootPath != ws.rootPath { - // If not registered on root then add specific mapping - if !newIsRegisteredOnRoot { - newIsRegisteredOnRoot = c.addHandler(each, newServeMux) - } - newServices = append(newServices, each) - } - } - c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot - return nil -} - -// logStackOnRecover is the default RecoverHandleFunction and is called -// when DoNotRecover is false and the recoverHandleFunc is not set for the container. -// Default implementation logs the stacktrace and writes the stacktrace on the response. -// This may be a security issue as it exposes sourcecode information. -func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) - for i := 2; ; i += 1 { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) - } - log.Print(buffer.String()) - httpWriter.WriteHeader(http.StatusInternalServerError) - httpWriter.Write(buffer.Bytes()) -} - -// writeServiceError is the default ServiceErrorHandleFunction and is called -// when a ServiceError is returned during route selection. Default implementation -// calls resp.WriteErrorString(err.Code, err.Message) -func writeServiceError(err ServiceError, req *Request, resp *Response) { - resp.WriteErrorString(err.Code, err.Message) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - if httpWriter == nil { - panic("httpWriter cannot be nil") - } - if httpRequest == nil { - panic("httpRequest cannot be nil") - } - c.dispatch(httpWriter, httpRequest) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - writer := httpWriter - - // CompressingResponseWriter should be closed after all operations are done - defer func() { - if compressWriter, ok := writer.(*CompressingResponseWriter); ok { - compressWriter.Close() - } - }() - - // Instal panic recovery unless told otherwise - if !c.doNotRecover { // catch all for 500 response - defer func() { - if r := recover(); r != nil { - c.recoverHandleFunc(r, writer) - return - } - }() - } - - // Find best match Route ; err is non nil if no match was found - var webService *WebService - var route *Route - var err error - func() { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - webService, route, err = c.router.SelectRoute( - c.webServices, - httpRequest) - }() - - // Detect if compression is needed - // assume without compression, test for override - contentEncodingEnabled := c.contentEncodingEnabled - if route != nil && route.contentEncodingEnabled != nil { - contentEncodingEnabled = *route.contentEncodingEnabled - } - if contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - - if err != nil { - // a non-200 response has already been written - // run container filters anyway ; they should not touch the response... - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - switch err.(type) { - case ServiceError: - ser := err.(ServiceError) - c.serviceErrorHandleFunc(ser, req, resp) - } - // TODO - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) - return - } - pathProcessor, routerProcessesPath := c.router.(PathProcessor) - if !routerProcessesPath { - pathProcessor = defaultPathProcessor{} - } - pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) - wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) - // pass through filters (if any) - if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { - // compose filter chain - allFilters := []FilterFunction{} - allFilters = append(allFilters, c.containerFilters...) - allFilters = append(allFilters, webService.filters...) - allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { - // handle request by route after passing all filters - route.Function(wrappedRequest, wrappedResponse) - }} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // no filters, handle request by route - route.Function(wrappedRequest, wrappedResponse) - } -} - -// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} -func fixedPrefixPath(pathspec string) string { - varBegin := strings.Index(pathspec, "{") - if -1 == varBegin { - return pathspec - } - return pathspec[:varBegin] -} - -// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server -func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { - c.ServeMux.ServeHTTP(httpwriter, httpRequest) -} - -// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. -func (c *Container) Handle(pattern string, handler http.Handler) { - c.ServeMux.Handle(pattern, handler) -} - -// HandleWithFilter registers the handler for the given pattern. -// Container's filter chain is applied for handler. -// If a handler already exists for pattern, HandleWithFilter panics. -func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { - f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { - if len(c.containerFilters) == 0 { - handler.ServeHTTP(httpResponse, httpRequest) - return - } - - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - handler.ServeHTTP(httpResponse, httpRequest) - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) - } - - c.Handle(pattern, http.HandlerFunc(f)) -} - -// Filter appends a container FilterFunction. These are called before dispatching -// a http.Request to a WebService from the container -func (c *Container) Filter(filter FilterFunction) { - c.containerFilters = append(c.containerFilters, filter) -} - -// RegisteredWebServices returns the collections of added WebServices -func (c *Container) RegisteredWebServices() []*WebService { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - result := make([]*WebService, len(c.webServices)) - for ix := range c.webServices { - result[ix] = c.webServices[ix] - } - return result -} - -// computeAllowedMethods returns a list of HTTP methods that are valid for a Request -func (c *Container) computeAllowedMethods(req *Request) []string { - // Go through all RegisteredWebServices() and all its Routes to collect the options - methods := []string{} - requestPath := req.Request.URL.Path - for _, ws := range c.RegisteredWebServices() { - matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - finalMatch := matches[len(matches)-1] - for _, rt := range ws.Routes() { - matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) - if matches != nil { - lastMatch := matches[len(matches)-1] - if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - methods = append(methods, rt.Method) - } - } - } - } - } - // methods = append(methods, "OPTIONS") not sure about this - return methods -} - -// newBasicRequestResponse creates a pair of Request,Response from its http versions. -// It is basic because no parameter or (produces) content-type information is given. -func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - resp := NewResponse(httpWriter) - resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) - return NewRequest(httpRequest), resp -} diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go deleted file mode 100644 index 1efeef0..0000000 --- a/vendor/github.com/emicklei/go-restful/cors_filter.go +++ /dev/null @@ -1,202 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "regexp" - "strconv" - "strings" -) - -// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. -// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page -// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. -// -// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing -// http://enable-cors.org/server.html -// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request -type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - AllowedHeaders []string // list of Header names - AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. - AllowedMethods []string - MaxAge int // number of seconds before requiring new Options request - CookiesAllowed bool - Container *Container - - allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. -} - -// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html -// and http://www.html5rocks.com/static/images/cors_server_flowchart.png -func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { - origin := req.Request.Header.Get(HEADER_Origin) - if len(origin) == 0 { - if trace { - traceLogger.Print("no Http header Origin set") - } - chain.ProcessFilter(req, resp) - return - } - if !c.isOriginAllowed(origin) { // check whether this origin is allowed - if trace { - traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) - } - chain.ProcessFilter(req, resp) - return - } - if req.Request.Method != "OPTIONS" { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } - if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { - c.doPreflightRequest(req, resp) - } else { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } -} - -func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { - c.setOptionsHeaders(req, resp) - // continue processing the response -} - -func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { - if len(c.AllowedMethods) == 0 { - if c.Container == nil { - c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) - } else { - c.AllowedMethods = c.Container.computeAllowedMethods(req) - } - } - - acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) - if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestMethod, - acrm, - c.AllowedMethods) - } - return - } - acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - if len(acrhs) > 0 { - for _, each := range strings.Split(acrhs, ",") { - if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestHeaders, - acrhs, - c.AllowedHeaders) - } - return - } - } - } - resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) - resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) - c.setOptionsHeaders(req, resp) - - // return http 200 response, no body -} - -func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { - c.checkAndSetExposeHeaders(resp) - c.setAllowOriginHeader(req, resp) - c.checkAndSetAllowCredentials(resp) - if c.MaxAge > 0 { - resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) - } -} - -func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { - if len(origin) == 0 { - return false - } - if len(c.AllowedDomains) == 0 { - return true - } - - allowed := false - for _, domain := range c.AllowedDomains { - if domain == origin { - allowed = true - break - } - } - - if !allowed { - if len(c.allowedOriginPatterns) == 0 { - // compile allowed domains to allowed origin patterns - allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) - if err != nil { - return false - } - c.allowedOriginPatterns = allowedOriginRegexps - } - - for _, pattern := range c.allowedOriginPatterns { - if allowed = pattern.MatchString(origin); allowed { - break - } - } - } - - return allowed -} - -func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { - origin := req.Request.Header.Get(HEADER_Origin) - if c.isOriginAllowed(origin) { - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - } -} - -func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { - if len(c.ExposeHeaders) > 0 { - resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) - } -} - -func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { - if c.CookiesAllowed { - resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") - } -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { - for _, each := range allowedMethods { - if each == method { - return true - } - } - return false -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { - for _, each := range c.AllowedHeaders { - if strings.ToLower(each) == strings.ToLower(header) { - return true - } - } - return false -} - -// Take a list of strings and compile them into a list of regular expressions. -func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return regexps, err - } - regexps = append(regexps, r) - } - return regexps, nil -} diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/coverage.sh deleted file mode 100644 index e27dbf1..0000000 --- a/vendor/github.com/emicklei/go-restful/coverage.sh +++ /dev/null @@ -1,2 +0,0 @@ -go test -coverprofile=coverage.out -go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go deleted file mode 100644 index 14d5b76..0000000 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ /dev/null @@ -1,164 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "regexp" - "sort" - "strings" -) - -// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. -type CurlyRouter struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (c CurlyRouter) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { - - requestTokens := tokenizePath(httpRequest.URL.Path) - - detectedService := c.detectWebService(requestTokens, webServices) - if detectedService == nil { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) - } - return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - candidateRoutes := c.selectRoutes(detectedService, requestTokens) - if len(candidateRoutes) == 0 { - if trace { - traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) - } - return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) - if selectedRoute == nil { - return detectedService, nil, err - } - return detectedService, selectedRoute, nil -} - -// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. -func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { - candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) - if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? - } - } - sort.Sort(candidates) - return candidates -} - -// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. -func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { - if len(routeTokens) < len(requestTokens) { - // proceed in matching only if last routeToken is wildcard - count := len(routeTokens) - if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { - return false, 0, 0 - } - // proceed - } - for i, routeToken := range routeTokens { - if i == len(requestTokens) { - // reached end of request path - return false, 0, 0 - } - requestToken := requestTokens[i] - if strings.HasPrefix(routeToken, "{") { - paramCount++ - if colon := strings.Index(routeToken, ":"); colon != -1 { - // match by regex - matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) - if !matchesToken { - return false, 0, 0 - } - if matchesRemainder { - break - } - } - } else { // no { prefix - if requestToken != routeToken { - return false, 0, 0 - } - staticCount++ - } - } - return true, paramCount, staticCount -} - -// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens -// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} -func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { - regPart := routeToken[colon+1 : len(routeToken)-1] - if regPart == "*" { - if trace { - traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) - } - return true, true - } - matched, err := regexp.MatchString(regPart, requestToken) - return (matched && err == nil), false -} - -var jsr311Router = RouterJSR311{} - -// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type -// headers of the Request. See also RouterJSR311 in jsr311.go -func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { - // tracing is done inside detectRoute - return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest) -} - -// detectWebService returns the best matching webService given the list of path tokens. -// see also computeWebserviceScore -func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService - score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) - if matches && (eachScore > score) { - best = each - score = eachScore - } - } - return best -} - -// computeWebserviceScore returns whether tokens match and -// the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { - return false, 0 - } - score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { - score++ - continue - } - if len(other) > 0 && strings.HasPrefix(other, "{") { - // no empty match - if len(each) == 0 { - return false, score - } - score += 1 - } else { - // not a parameter - if each != other { - return false, score - } - score += (len(tokens) - i) * 10 //fuzzy - } - } - return true, score -} diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go deleted file mode 100644 index 403dd3b..0000000 --- a/vendor/github.com/emicklei/go-restful/curly_route.go +++ /dev/null @@ -1,54 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. -type curlyRoute struct { - route Route - paramCount int - staticCount int -} - -// sortableCurlyRoutes orders by most parameters and path elements first. -type sortableCurlyRoutes []curlyRoute - -func (s *sortableCurlyRoutes) add(route curlyRoute) { - *s = append(*s, route) -} - -func (s sortableCurlyRoutes) routes() (routes []Route) { - routes = make([]Route, 0, len(s)) - for _, each := range s { - routes = append(routes, each.route) // TODO change return type - } - return routes -} - -func (s sortableCurlyRoutes) Len() int { - return len(s) -} -func (s sortableCurlyRoutes) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sortableCurlyRoutes) Less(i, j int) bool { - a := s[j] - b := s[i] - - // primary key - if a.staticCount < b.staticCount { - return true - } - if a.staticCount > b.staticCount { - return false - } - // secundary key - if a.paramCount < b.paramCount { - return true - } - if a.paramCount > b.paramCount { - return false - } - return a.route.Path < b.route.Path -} diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go deleted file mode 100644 index f7c16b0..0000000 --- a/vendor/github.com/emicklei/go-restful/doc.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Package restful , a lean package for creating REST-style WebServices without magic. - -WebServices and Routes - -A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. -Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. -WebServices must be added to a container (see below) in order to handler Http requests from a server. - -A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). -This package has the logic to find the best matching Route and if found, call its Function. - - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON, restful.MIME_XML) - - ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource - - ... - - // GET http://localhost:8080/users/1 - func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... - } - -The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. - -Regular expression matching Routes - -A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. -For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. -Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) -This feature requires the use of a CurlyRouter. - -Containers - -A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. -Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. -The Default container of go-restful uses the http.DefaultServeMux. -You can create your own Container and create a new http.Server for that particular container. - - container := restful.NewContainer() - server := &http.Server{Addr: ":8081", Handler: container} - -Filters - -A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. -You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. -In the restful package there are three hooks into the request,response flow where filters can be added. -Each filter must define a FilterFunction: - - func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) - -Use the following statement to pass the request,response pair to the next filter or RouteFunction - - chain.ProcessFilter(req, resp) - -Container Filters - -These are processed before any registered WebService. - - // install a (global) filter for the default container (processed before any webservice) - restful.Filter(globalLogging) - -WebService Filters - -These are processed before any Route of a WebService. - - // install a webservice filter (processed before any route) - ws.Filter(webserviceLogging).Filter(measureTime) - - -Route Filters - -These are processed before calling the function associated with the Route. - - // install 2 chained route filters (processed before calling findUser) - ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. - -Response Encoding - -Two encodings are supported: gzip and deflate. To enable this for all responses: - - restful.DefaultContainer.EnableContentEncoding(true) - -If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. -Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go - -OPTIONS support - -By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. - - Filter(OPTIONSFilter()) - -CORS - -By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. - - cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} - Filter(cors.Filter) - -Error Handling - -Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. -For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. - - 400: Bad Request - -If path or query parameters are not valid (content or type) then use http.StatusBadRequest. - - 404: Not Found - -Despite a valid URI, the resource requested may not be available - - 500: Internal Server Error - -If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. - - 405: Method Not Allowed - -The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. - - 406: Not Acceptable - -The request does not have or has an unknown Accept Header set for this operation. - - 415: Unsupported Media Type - -The request does not have or has an unknown Content-Type Header set for this operation. - -ServiceError - -In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. - -Performance options - -This package has several options that affect the performance of your service. It is important to understand them and how you can change it. - - restful.DefaultContainer.DoNotRecover(false) - -DoNotRecover controls whether panics will be caught to return HTTP 500. -If set to false, the container will recover from panics. -Default value is true - - restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) - -If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. -Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. - -Trouble shooting - -This package has the means to produce detail logging of the complete Http request matching process and filter invocation. -Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: - - restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) - -Logging - -The restful.SetLogger() method allows you to override the logger used by the package. By default restful -uses the standard library `log` package and logs to stdout. Different logging packages are supported as -long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your -preferred package is simple. - -Resources - -[project]: https://github.com/emicklei/go-restful - -[examples]: https://github.com/emicklei/go-restful/blob/master/examples - -[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ - -[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape - -(c) 2012-2015, http://ernestmicklei.com. MIT License -*/ -package restful diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go deleted file mode 100644 index 66dfc82..0000000 --- a/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ /dev/null @@ -1,162 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "encoding/xml" - "strings" - "sync" -) - -// EntityReaderWriter can read and write values using an encoding such as JSON,XML. -type EntityReaderWriter interface { - // Read a serialized version of the value from the request. - // The Request may have a decompressing reader. Depends on Content-Encoding. - Read(req *Request, v interface{}) error - - // Write a serialized version of the value on the response. - // The Response may have a compressing writer. Depends on Accept-Encoding. - // status should be a valid Http Status code - Write(resp *Response, status int, v interface{}) error -} - -// entityAccessRegistry is a singleton -var entityAccessRegistry = &entityReaderWriters{ - protection: new(sync.RWMutex), - accessors: map[string]EntityReaderWriter{}, -} - -// entityReaderWriters associates MIME to an EntityReaderWriter -type entityReaderWriters struct { - protection *sync.RWMutex - accessors map[string]EntityReaderWriter -} - -func init() { - RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) - RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) -} - -// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. -func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { - entityAccessRegistry.protection.Lock() - defer entityAccessRegistry.protection.Unlock() - entityAccessRegistry.accessors[mime] = erw -} - -// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. -// This package is already initialized with such an accessor using the MIME_JSON contentType. -func NewEntityAccessorJSON(contentType string) EntityReaderWriter { - return entityJSONAccess{ContentType: contentType} -} - -// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. -// This package is already initialized with such an accessor using the MIME_XML contentType. -func NewEntityAccessorXML(contentType string) EntityReaderWriter { - return entityXMLAccess{ContentType: contentType} -} - -// accessorAt returns the registered ReaderWriter for this MIME type. -func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { - r.protection.RLock() - defer r.protection.RUnlock() - er, ok := r.accessors[mime] - if !ok { - // retry with reverse lookup - // more expensive but we are in an exceptional situation anyway - for k, v := range r.accessors { - if strings.Contains(mime, k) { - return v, true - } - } - } - return er, ok -} - -// entityXMLAccess is a EntityReaderWriter for XML encoding -type entityXMLAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from XML -func (e entityXMLAccess) Read(req *Request, v interface{}) error { - return xml.NewDecoder(req.Request.Body).Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { - return writeXML(resp, status, e.ContentType, v) -} - -// writeXML marshalls the value to JSON and set the Content-Type Header. -func writeXML(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := xml.MarshalIndent(v, " ", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write([]byte(xml.Header)) - if err != nil { - return err - } - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return xml.NewEncoder(resp).Encode(v) -} - -// entityJSONAccess is a EntityReaderWriter for JSON encoding -type entityJSONAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from JSON -func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := NewDecoder(req.Request.Body) - decoder.UseNumber() - return decoder.Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { - return writeJSON(resp, status, e.ContentType, v) -} - -// write marshalls the value to JSON and set the Content-Type Header. -func writeJSON(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := MarshalIndent(v, "", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return NewEncoder(resp).Encode(v) -} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go deleted file mode 100644 index c23bfb5..0000000 --- a/vendor/github.com/emicklei/go-restful/filter.go +++ /dev/null @@ -1,35 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. -type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters -} - -// ProcessFilter passes the request,response pair through the next of Filters. -// Each filter can decide to proceed to the next Filter or handle the Response itself. -func (f *FilterChain) ProcessFilter(request *Request, response *Response) { - if f.Index < len(f.Filters) { - f.Index++ - f.Filters[f.Index-1](request, response, f) - } else { - f.Target(request, response) - } -} - -// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction -type FilterFunction func(*Request, *Response, *FilterChain) - -// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching -// See examples/restful-no-cache-filter.go for usage -func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) { - resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. - resp.Header().Set("Pragma", "no-cache") // HTTP 1.0. - resp.Header().Set("Expires", "0") // Proxies. - chain.ProcessFilter(req, resp) -} diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/json.go deleted file mode 100644 index 8711651..0000000 --- a/vendor/github.com/emicklei/go-restful/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/jsoniter.go deleted file mode 100644 index 11b8f8a..0000000 --- a/vendor/github.com/emicklei/go-restful/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go deleted file mode 100644 index 3ede189..0000000 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ /dev/null @@ -1,297 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "errors" - "fmt" - "net/http" - "sort" -) - -// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) -// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. -// RouterJSR311 implements the Router interface. -// Concept of locators is not implemented. -type RouterJSR311 struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (r RouterJSR311) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { - - // Identify the root resource class (WebService) - dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) - if err != nil { - return nil, nil, NewError(http.StatusNotFound, "") - } - // Obtain the set of candidate methods (Routes) - routes := r.selectRoutes(dispatcher, finalMatch) - if len(routes) == 0 { - return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - - // Identify the method (Route) that will handle the request - route, ok := r.detectRoute(routes, httpRequest) - return dispatcher, route, ok -} - -// ExtractParameters is used to obtain the path parameters from the route using the same matching -// engine as the JSR 311 router. -func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string { - webServiceExpr := webService.pathExpr - webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath) - pathParameters := r.extractParams(webServiceExpr, webServiceMatches) - routeExpr := route.pathExpr - routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1]) - routeParams := r.extractParams(routeExpr, routeMatches) - for key, value := range routeParams { - pathParameters[key] = value - } - return pathParameters -} - -func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string { - params := map[string]string{} - for i := 1; i < len(matches); i++ { - if len(pathExpr.VarNames) >= i { - params[pathExpr.VarNames[i-1]] = matches[i] - } - } - return params -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { - candidates := make([]*Route, 0, 8) - for i, each := range routes { - ok := true - for _, fn := range each.If { - if !fn(httpRequest) { - ok = false - break - } - } - if ok { - candidates = append(candidates, &routes[i]) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) - } - return nil, NewError(http.StatusNotFound, "404: Not Found") - } - - // http method - previous := candidates - candidates = candidates[:0] - for _, each := range previous { - if httpRequest.Method == each.Method { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) - } - return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") - } - - // content-type - contentType := httpRequest.Header.Get(HEADER_ContentType) - previous = candidates - candidates = candidates[:0] - for _, each := range previous { - if each.matchesContentType(contentType) { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) - } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } - } - - // accept - previous = candidates - candidates = candidates[:0] - accept := httpRequest.Header.Get(HEADER_Accept) - if len(accept) == 0 { - accept = "*/*" - } - for _, each := range previous { - if each.matchesAccept(accept) { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) - } - return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") - } - // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil - return candidates[0], nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -// n/m > n/* > */* -func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { - // TODO - return &routes[0] -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) -func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { - filtered := &sortableRouteCandidates{} - for _, each := range dispatcher.Routes() { - pathExpr := each.pathExpr - matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) - if matches != nil { - lastMatch := matches[len(matches)-1] - if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - filtered.candidates = append(filtered.candidates, - routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) - } - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) - } - return []Route{} - } - sort.Sort(sort.Reverse(filtered)) - - // select other routes from candidates whoes expression matches rmatch - matchingRoutes := []Route{filtered.candidates[0].route} - for c := 1; c < len(filtered.candidates); c++ { - each := filtered.candidates[c] - if each.route.pathExpr.Matcher.MatchString(pathRemainder) { - matchingRoutes = append(matchingRoutes, each.route) - } - } - return matchingRoutes -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) -func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { - filtered := &sortableDispatcherCandidates{} - for _, each := range dispatchers { - matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - filtered.candidates = append(filtered.candidates, - dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) - } - return nil, "", errors.New("not found") - } - sort.Sort(sort.Reverse(filtered)) - return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil -} - -// Types and functions to support the sorting of Routes - -type routeCandidate struct { - route Route - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} - -func (r routeCandidate) expressionToMatch() string { - return r.route.pathExpr.Source -} - -func (r routeCandidate) String() string { - return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) -} - -type sortableRouteCandidates struct { - candidates []routeCandidate -} - -func (rcs *sortableRouteCandidates) Len() int { - return len(rcs.candidates) -} -func (rcs *sortableRouteCandidates) Swap(i, j int) { - rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] -} -func (rcs *sortableRouteCandidates) Less(i, j int) bool { - ci := rcs.candidates[i] - cj := rcs.candidates[j] - // primary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // secundary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // tertiary key - if ci.nonDefaultCount < cj.nonDefaultCount { - return true - } - if ci.nonDefaultCount > cj.nonDefaultCount { - return false - } - // quaternary key ("source" is interpreted as Path) - return ci.route.Path < cj.route.Path -} - -// Types and functions to support the sorting of Dispatchers - -type dispatcherCandidate struct { - dispatcher *WebService - finalMatch string - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} -type sortableDispatcherCandidates struct { - candidates []dispatcherCandidate -} - -func (dc *sortableDispatcherCandidates) Len() int { - return len(dc.candidates) -} -func (dc *sortableDispatcherCandidates) Swap(i, j int) { - dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] -} -func (dc *sortableDispatcherCandidates) Less(i, j int) bool { - ci := dc.candidates[i] - cj := dc.candidates[j] - // primary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // secundary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // tertiary key - return ci.nonDefaultCount < cj.nonDefaultCount -} diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go deleted file mode 100644 index 6cd44c7..0000000 --- a/vendor/github.com/emicklei/go-restful/log/log.go +++ /dev/null @@ -1,34 +0,0 @@ -package log - -import ( - stdlog "log" - "os" -) - -// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) -} - -var Logger StdLogger - -func init() { - // default Logger - SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) -} - -// SetLogger sets the logger for this package -func SetLogger(customLogger StdLogger) { - Logger = customLogger -} - -// Print delegates to the Logger -func Print(v ...interface{}) { - Logger.Print(v...) -} - -// Printf delegates to the Logger -func Printf(format string, v ...interface{}) { - Logger.Printf(format, v...) -} diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go deleted file mode 100644 index 6595df0..0000000 --- a/vendor/github.com/emicklei/go-restful/logger.go +++ /dev/null @@ -1,32 +0,0 @@ -package restful - -// Copyright 2014 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. -import ( - "github.com/emicklei/go-restful/log" -) - -var trace bool = false -var traceLogger log.StdLogger - -func init() { - traceLogger = log.Logger // use the package logger by default -} - -// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. -// You may call EnableTracing() directly to enable trace logging to the package-wide logger. -func TraceLogger(logger log.StdLogger) { - traceLogger = logger - EnableTracing(logger != nil) -} - -// SetLogger exposes the setter for the global logger on the top-level package -func SetLogger(customLogger log.StdLogger) { - log.SetLogger(customLogger) -} - -// EnableTracing can be used to Trace logging on and off. -func EnableTracing(enabled bool) { - trace = enabled -} diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go deleted file mode 100644 index 3301447..0000000 --- a/vendor/github.com/emicklei/go-restful/mime.go +++ /dev/null @@ -1,50 +0,0 @@ -package restful - -import ( - "strconv" - "strings" -) - -type mime struct { - media string - quality float64 -} - -// insertMime adds a mime to a list and keeps it sorted by quality. -func insertMime(l []mime, e mime) []mime { - for i, each := range l { - // if current mime has lower quality then insert before - if e.quality > each.quality { - left := append([]mime{}, l[0:i]...) - return append(append(left, e), l[i:]...) - } - } - return append(l, e) -} - -const qFactorWeightingKey = "q" - -// sortedMimes returns a list of mime sorted (desc) by its specified quality. -// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3 -func sortedMimes(accept string) (sorted []mime) { - for _, each := range strings.Split(accept, ",") { - typeAndQuality := strings.Split(strings.Trim(each, " "), ";") - if len(typeAndQuality) == 1 { - sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) - } else { - // take factor - qAndWeight := strings.Split(typeAndQuality[1], "=") - if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey { - f, err := strconv.ParseFloat(qAndWeight[1], 64) - if err != nil { - traceLogger.Printf("unable to parse quality in %s, %v", each, err) - } else { - sorted = insertMime(sorted, mime{typeAndQuality[0], f}) - } - } else { - sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) - } - } - } - return -} diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go deleted file mode 100644 index 5c1b342..0000000 --- a/vendor/github.com/emicklei/go-restful/options_filter.go +++ /dev/null @@ -1,34 +0,0 @@ -package restful - -import "strings" - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// As for any filter, you can also install it for a particular WebService within a Container. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { - if "OPTIONS" != req.Request.Method { - chain.ProcessFilter(req, resp) - return - } - - archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - methods := strings.Join(c.computeAllowedMethods(req), ",") - origin := req.Request.Header.Get(HEADER_Origin) - - resp.AddHeader(HEADER_Allow, methods) - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - resp.AddHeader(HEADER_AccessControlAllowHeaders, archs) - resp.AddHeader(HEADER_AccessControlAllowMethods, methods) -} - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func OPTIONSFilter() FilterFunction { - return DefaultContainer.OPTIONSFilter -} diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go deleted file mode 100644 index e879330..0000000 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ /dev/null @@ -1,143 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - // PathParameterKind = indicator of Request parameter type "path" - PathParameterKind = iota - - // QueryParameterKind = indicator of Request parameter type "query" - QueryParameterKind - - // BodyParameterKind = indicator of Request parameter type "body" - BodyParameterKind - - // HeaderParameterKind = indicator of Request parameter type "header" - HeaderParameterKind - - // FormParameterKind = indicator of Request parameter type "form" - FormParameterKind - - // CollectionFormatCSV comma separated values `foo,bar` - CollectionFormatCSV = CollectionFormat("csv") - - // CollectionFormatSSV space separated values `foo bar` - CollectionFormatSSV = CollectionFormat("ssv") - - // CollectionFormatTSV tab separated values `foo\tbar` - CollectionFormatTSV = CollectionFormat("tsv") - - // CollectionFormatPipes pipe separated values `foo|bar` - CollectionFormatPipes = CollectionFormat("pipes") - - // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single - // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters - CollectionFormatMulti = CollectionFormat("multi") -) - -type CollectionFormat string - -func (cf CollectionFormat) String() string { - return string(cf) -} - -// Parameter is for documententing the parameter used in a Http Request -// ParameterData kinds are Path,Query and Body -type Parameter struct { - data *ParameterData -} - -// ParameterData represents the state of a Parameter. -// It is made public to make it accessible to e.g. the Swagger package. -type ParameterData struct { - Name, Description, DataType, DataFormat string - Kind int - Required bool - AllowableValues map[string]string - AllowMultiple bool - DefaultValue string - CollectionFormat string -} - -// Data returns the state of the Parameter -func (p *Parameter) Data() ParameterData { - return *p.data -} - -// Kind returns the parameter type indicator (see const for valid values) -func (p *Parameter) Kind() int { - return p.data.Kind -} - -func (p *Parameter) bePath() *Parameter { - p.data.Kind = PathParameterKind - return p -} -func (p *Parameter) beQuery() *Parameter { - p.data.Kind = QueryParameterKind - return p -} -func (p *Parameter) beBody() *Parameter { - p.data.Kind = BodyParameterKind - return p -} - -func (p *Parameter) beHeader() *Parameter { - p.data.Kind = HeaderParameterKind - return p -} - -func (p *Parameter) beForm() *Parameter { - p.data.Kind = FormParameterKind - return p -} - -// Required sets the required field and returns the receiver -func (p *Parameter) Required(required bool) *Parameter { - p.data.Required = required - return p -} - -// AllowMultiple sets the allowMultiple field and returns the receiver -func (p *Parameter) AllowMultiple(multiple bool) *Parameter { - p.data.AllowMultiple = multiple - return p -} - -// AllowableValues sets the allowableValues field and returns the receiver -func (p *Parameter) AllowableValues(values map[string]string) *Parameter { - p.data.AllowableValues = values - return p -} - -// DataType sets the dataType field and returns the receiver -func (p *Parameter) DataType(typeName string) *Parameter { - p.data.DataType = typeName - return p -} - -// DataFormat sets the dataFormat field for Swagger UI -func (p *Parameter) DataFormat(formatName string) *Parameter { - p.data.DataFormat = formatName - return p -} - -// DefaultValue sets the default value field and returns the receiver -func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { - p.data.DefaultValue = stringRepresentation - return p -} - -// Description sets the description value field and returns the receiver -func (p *Parameter) Description(doc string) *Parameter { - p.data.Description = doc - return p -} - -// CollectionFormat sets the collection format for an array type -func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { - p.data.CollectionFormat = format.String() - return p -} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go deleted file mode 100644 index 95a9a25..0000000 --- a/vendor/github.com/emicklei/go-restful/path_expression.go +++ /dev/null @@ -1,74 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "fmt" - "regexp" - "strings" -) - -// PathExpression holds a compiled path expression (RegExp) needed to match against -// Http request paths and to extract path parameter values. -type pathExpression struct { - LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) - VarNames []string // the names of parameters (enclosed by {}) in the path - VarCount int // the number of named parameters (enclosed by {}) in the path - Matcher *regexp.Regexp - Source string // Path as defined by the RouteBuilder - tokens []string -} - -// NewPathExpression creates a PathExpression from the input URL path. -// Returns an error if the path is invalid. -func newPathExpression(path string) (*pathExpression, error) { - expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path) - compiled, err := regexp.Compile(expression) - if err != nil { - return nil, err - } - return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { - var buffer bytes.Buffer - buffer.WriteString("^") - //tokens = strings.Split(template, "/") - tokens = tokenizePath(template) - for _, each := range tokens { - if each == "" { - continue - } - buffer.WriteString("/") - if strings.HasPrefix(each, "{") { - // check for regular expression in variable - colon := strings.Index(each, ":") - var varName string - if colon != -1 { - // extract expression - varName = strings.TrimSpace(each[1:colon]) - paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) - if paramExpr == "*" { // special case - buffer.WriteString("(.*)") - } else { - buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache - } - } else { - // plain var - varName = strings.TrimSpace(each[1 : len(each)-1]) - buffer.WriteString("([^/]+?)") - } - varNames = append(varNames, varName) - varCount += 1 - } else { - literalCount += len(each) - encoded := each // TODO URI encode - buffer.WriteString(regexp.QuoteMeta(encoded)) - } - } - return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens -} diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/path_processor.go deleted file mode 100644 index 357c723..0000000 --- a/vendor/github.com/emicklei/go-restful/path_processor.go +++ /dev/null @@ -1,63 +0,0 @@ -package restful - -import ( - "bytes" - "strings" -) - -// Copyright 2018 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. -// If a Router does not implement this interface then the default behaviour will be used. -type PathProcessor interface { - // ExtractParameters gets the path parameters defined in the route and webService from the urlPath - ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string -} - -type defaultPathProcessor struct{} - -// Extract the parameters from the request url path -func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string { - urlParts := tokenizePath(urlPath) - pathParameters := map[string]string{} - for i, key := range r.pathParts { - var value string - if i >= len(urlParts) { - value = "" - } else { - value = urlParts[i] - } - if strings.HasPrefix(key, "{") { // path-parameter - if colon := strings.Index(key, ":"); colon != -1 { - // extract by regex - regPart := key[colon+1 : len(key)-1] - keyPart := key[1:colon] - if regPart == "*" { - pathParameters[keyPart] = untokenizePath(i, urlParts) - break - } else { - pathParameters[keyPart] = value - } - } else { - // without enclosing {} - pathParameters[key[1:len(key)-1]] = value - } - } - } - return pathParameters -} - -// Untokenize back into an URL path using the slash separator -func untokenizePath(offset int, parts []string) string { - var buffer bytes.Buffer - for p := offset; p < len(parts); p++ { - buffer.WriteString(parts[p]) - // do not end - if p < len(parts)-1 { - buffer.WriteString("/") - } - } - return buffer.String() -} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go deleted file mode 100644 index a20730f..0000000 --- a/vendor/github.com/emicklei/go-restful/request.go +++ /dev/null @@ -1,118 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/zlib" - "net/http" -) - -var defaultRequestContentType string - -// Request is a wrapper for a http Request that provides convenience methods -type Request struct { - Request *http.Request - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees -} - -func NewRequest(httpRequest *http.Request) *Request { - return &Request{ - Request: httpRequest, - pathParameters: map[string]string{}, - attributes: map[string]interface{}{}, - } // empty parameters, attributes -} - -// If ContentType is missing or */* is given then fall back to this type, otherwise -// a "Unable to unmarshal content of type:" response is returned. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) -func DefaultRequestContentType(mime string) { - defaultRequestContentType = mime -} - -// PathParameter accesses the Path parameter value by its name -func (r *Request) PathParameter(name string) string { - return r.pathParameters[name] -} - -// PathParameters accesses the Path parameter values -func (r *Request) PathParameters() map[string]string { - return r.pathParameters -} - -// QueryParameter returns the (first) Query parameter value by its name -func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) -} - -// QueryParameters returns the all the query parameters values by name -func (r *Request) QueryParameters(name string) []string { - return r.Request.URL.Query()[name] -} - -// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. -func (r *Request) BodyParameter(name string) (string, error) { - err := r.Request.ParseForm() - if err != nil { - return "", err - } - return r.Request.PostFormValue(name), nil -} - -// HeaderParameter returns the HTTP Header value of a Header name or empty if missing -func (r *Request) HeaderParameter(name string) string { - return r.Request.Header.Get(name) -} - -// ReadEntity checks the Accept header and reads the content into the entityPointer. -func (r *Request) ReadEntity(entityPointer interface{}) (err error) { - contentType := r.Request.Header.Get(HEADER_ContentType) - contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) - - // check if the request body needs decompression - if ENCODING_GZIP == contentEncoding { - gzipReader := currentCompressorProvider.AcquireGzipReader() - defer currentCompressorProvider.ReleaseGzipReader(gzipReader) - gzipReader.Reset(r.Request.Body) - r.Request.Body = gzipReader - } else if ENCODING_DEFLATE == contentEncoding { - zlibReader, err := zlib.NewReader(r.Request.Body) - if err != nil { - return err - } - r.Request.Body = zlibReader - } - - // lookup the EntityReader, use defaultRequestContentType if needed and provided - entityReader, ok := entityAccessRegistry.accessorAt(contentType) - if !ok { - if len(defaultRequestContentType) != 0 { - entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) - } - if !ok { - return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) - } - } - return entityReader.Read(r, entityPointer) -} - -// SetAttribute adds or replaces the attribute with the given value. -func (r *Request) SetAttribute(name string, value interface{}) { - r.attributes[name] = value -} - -// Attribute returns the value associated to the given name. Returns nil if absent. -func (r Request) Attribute(name string) interface{} { - return r.attributes[name] -} - -// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees -func (r Request) SelectedRoutePath() string { - return r.selectedRoutePath -} diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go deleted file mode 100644 index fbb48f2..0000000 --- a/vendor/github.com/emicklei/go-restful/response.go +++ /dev/null @@ -1,255 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "errors" - "net" - "net/http" -) - -// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime) -var DefaultResponseMimeType string - -//PrettyPrintResponses controls the indentation feature of XML and JSON serialization -var PrettyPrintResponses = true - -// Response is a wrapper on the actual http ResponseWriter -// It provides several convenience methods to prepare and write response content. -type Response struct { - http.ResponseWriter - requestAccept string // mime-type what the Http Request says it wants to receive - routeProduces []string // mime-types what the Route says it can produce - statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) - contentLength int // number of bytes written for the response body - prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. - err error // err property is kept when WriteError is called - hijacker http.Hijacker // if underlying ResponseWriter supports it -} - -// NewResponse creates a new response based on a http ResponseWriter. -func NewResponse(httpWriter http.ResponseWriter) *Response { - hijacker, _ := httpWriter.(http.Hijacker) - return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker} -} - -// DefaultResponseContentType set a default. -// If Accept header matching fails, fall back to this type. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultResponseContentType(restful.MIME_JSON) -func DefaultResponseContentType(mime string) { - DefaultResponseMimeType = mime -} - -// InternalServerError writes the StatusInternalServerError header. -// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) -func (r Response) InternalServerError() Response { - r.WriteHeader(http.StatusInternalServerError) - return r -} - -// Hijack implements the http.Hijacker interface. This expands -// the Response to fulfill http.Hijacker if the underlying -// http.ResponseWriter supports it. -func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if r.hijacker == nil { - return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter") - } - return r.hijacker.Hijack() -} - -// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. -func (r *Response) PrettyPrint(bePretty bool) { - r.prettyPrint = bePretty -} - -// AddHeader is a shortcut for .Header().Add(header,value) -func (r Response) AddHeader(header string, value string) Response { - r.Header().Add(header, value) - return r -} - -// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. -func (r *Response) SetRequestAccepts(mime string) { - r.requestAccept = mime -} - -// EntityWriter returns the registered EntityWriter that the entity (requested resource) -// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. -// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. -func (r *Response) EntityWriter() (EntityReaderWriter, bool) { - sorted := sortedMimes(r.requestAccept) - for _, eachAccept := range sorted { - for _, eachProduce := range r.routeProduces { - if eachProduce == eachAccept.media { - if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { - return w, true - } - } - } - if eachAccept.media == "*/*" { - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - } - } - // if requestAccept is empty - writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) - if !ok { - // if not registered then fallback to the defaults (if set) - if DefaultResponseMimeType == MIME_JSON { - return entityAccessRegistry.accessorAt(MIME_JSON) - } - if DefaultResponseMimeType == MIME_XML { - return entityAccessRegistry.accessorAt(MIME_XML) - } - // Fallback to whatever the route says it can produce. - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - if trace { - traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) - } - } - return writer, ok -} - -// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) -func (r *Response) WriteEntity(value interface{}) error { - return r.WriteHeaderAndEntity(http.StatusOK, value) -} - -// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. -// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. -// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. -// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. -// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. -// Current implementation ignores any q-parameters in the Accept Header. -// Returns an error if the value could not be written on the response. -func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { - writer, ok := r.EntityWriter() - if !ok { - r.WriteHeader(http.StatusNotAcceptable) - return nil - } - return writer.Write(r, status, value) -} - -// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsXml(value interface{}) error { - return writeXML(r, http.StatusOK, MIME_XML, value) -} - -// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { - return writeXML(r, status, MIME_XML, value) -} - -// WriteAsJson is a convenience method for writing a value in json. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsJson(value interface{}) error { - return writeJSON(r, http.StatusOK, MIME_JSON, value) -} - -// WriteJson is a convenience method for writing a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteJson(value interface{}, contentType string) error { - return writeJSON(r, http.StatusOK, contentType, value) -} - -// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { - return writeJSON(r, status, contentType, value) -} - -// WriteError write the http status and the error string on the response. err can be nil. -func (r *Response) WriteError(httpStatus int, err error) error { - r.err = err - if err == nil { - r.WriteErrorString(httpStatus, "") - } else { - r.WriteErrorString(httpStatus, err.Error()) - } - return err -} - -// WriteServiceError is a convenience method for a responding with a status and a ServiceError -func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { - r.err = err - return r.WriteHeaderAndEntity(httpStatus, err) -} - -// WriteErrorString is a convenience method for an error status with the actual error -func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { - if r.err == nil { - // if not called from WriteError - r.err = errors.New(errorReason) - } - r.WriteHeader(httpStatus) - if _, err := r.Write([]byte(errorReason)); err != nil { - return err - } - return nil -} - -// Flush implements http.Flusher interface, which sends any buffered data to the client. -func (r *Response) Flush() { - if f, ok := r.ResponseWriter.(http.Flusher); ok { - f.Flush() - } else if trace { - traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) - } -} - -// WriteHeader is overridden to remember the Status Code that has been written. -// Changes to the Header of the response have no effect after this. -func (r *Response) WriteHeader(httpStatus int) { - r.statusCode = httpStatus - r.ResponseWriter.WriteHeader(httpStatus) -} - -// StatusCode returns the code that has been written using WriteHeader. -func (r Response) StatusCode() int { - if 0 == r.statusCode { - // no status code has been written yet; assume OK - return http.StatusOK - } - return r.statusCode -} - -// Write writes the data to the connection as part of an HTTP reply. -// Write is part of http.ResponseWriter interface. -func (r *Response) Write(bytes []byte) (int, error) { - written, err := r.ResponseWriter.Write(bytes) - r.contentLength += written - return written, err -} - -// ContentLength returns the number of bytes written for the response content. -// Note that this value is only correct if all data is written through the Response using its Write* methods. -// Data written directly using the underlying http.ResponseWriter is not accounted for. -func (r Response) ContentLength() int { - return r.contentLength -} - -// CloseNotify is part of http.CloseNotifier interface -func (r Response) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Error returns the err created by WriteError -func (r Response) Error() error { - return r.err -} diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go deleted file mode 100644 index 6d15dbf..0000000 --- a/vendor/github.com/emicklei/go-restful/route.go +++ /dev/null @@ -1,170 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "strings" -) - -// RouteFunction declares the signature of a function that can be bound to a Route. -type RouteFunction func(*Request, *Response) - -// RouteSelectionConditionFunction declares the signature of a function that -// can be used to add extra conditional logic when selecting whether the route -// matches the HTTP request. -type RouteSelectionConditionFunction func(httpRequest *http.Request) bool - -// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. -type Route struct { - Method string - Produces []string - Consumes []string - Path string // webservice root path + described path - Function RouteFunction - Filters []FilterFunction - If []RouteSelectionConditionFunction - - // cached values for dispatching - relativePath string - pathParts []string - pathExpr *pathExpression // cached compilation of relativePath as RegExp - - // documentation - Doc string - Notes string - Operation string - ParameterDocs []*Parameter - ResponseErrors map[int]ResponseError - DefaultResponse *ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload - - // Extra information used to store custom information about the route. - Metadata map[string]interface{} - - // marks a route as deprecated - Deprecated bool - - //Overrides the container.contentEncodingEnabled - contentEncodingEnabled *bool -} - -// Initialize for Route -func (r *Route) postBuild() { - r.pathParts = tokenizePath(r.Path) -} - -// Create Request and Response from their http versions -func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { - wrappedRequest := NewRequest(httpRequest) - wrappedRequest.pathParameters = pathParams - wrappedRequest.selectedRoutePath = r.Path - wrappedResponse := NewResponse(httpWriter) - wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) - wrappedResponse.routeProduces = r.Produces - return wrappedRequest, wrappedResponse -} - -// dispatchWithFilters call the function after passing through its own filters -func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { - if len(r.Filters) > 0 { - chain := FilterChain{Filters: r.Filters, Target: r.Function} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // unfiltered - r.Function(wrappedRequest, wrappedResponse) - } -} - -func stringTrimSpaceCutset(r rune) bool { - return r == ' ' -} - -// Return whether the mimeType matches to what this Route can produce. -func (r Route) matchesAccept(mimeTypesWithQuality string) bool { - remaining := mimeTypesWithQuality - for { - var mimeType string - if end := strings.Index(remaining, ","); end == -1 { - mimeType, remaining = remaining, "" - } else { - mimeType, remaining = remaining[:end], remaining[end+1:] - } - if quality := strings.Index(mimeType, ";"); quality != -1 { - mimeType = mimeType[:quality] - } - mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) - if mimeType == "*/*" { - return true - } - for _, producibleType := range r.Produces { - if producibleType == "*/*" || producibleType == mimeType { - return true - } - } - if len(remaining) == 0 { - return false - } - } -} - -// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). -func (r Route) matchesContentType(mimeTypes string) bool { - - if len(r.Consumes) == 0 { - // did not specify what it can consume ; any media type (“*/*”) is assumed - return true - } - - if len(mimeTypes) == 0 { - // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type - m := r.Method - if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { - return true - } - // proceed with default - mimeTypes = MIME_OCTET - } - - remaining := mimeTypes - for { - var mimeType string - if end := strings.Index(remaining, ","); end == -1 { - mimeType, remaining = remaining, "" - } else { - mimeType, remaining = remaining[:end], remaining[end+1:] - } - if quality := strings.Index(mimeType, ";"); quality != -1 { - mimeType = mimeType[:quality] - } - mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) - for _, consumeableType := range r.Consumes { - if consumeableType == "*/*" || consumeableType == mimeType { - return true - } - } - if len(remaining) == 0 { - return false - } - } -} - -// Tokenize an URL path using the slash separator ; the result does not have empty tokens -func tokenizePath(path string) []string { - if "/" == path { - return nil - } - return strings.Split(strings.Trim(path, "/"), "/") -} - -// for debugging -func (r Route) String() string { - return r.Method + " " + r.Path -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. -func (r Route) EnableContentEncoding(enabled bool) { - r.contentEncodingEnabled = &enabled -} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go deleted file mode 100644 index 0fccf61..0000000 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ /dev/null @@ -1,326 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync/atomic" - - "github.com/emicklei/go-restful/log" -) - -// RouteBuilder is a helper to construct Routes. -type RouteBuilder struct { - rootPath string - currentPath string - produces []string - consumes []string - httpMethod string // required - function RouteFunction // required - filters []FilterFunction - conditions []RouteSelectionConditionFunction - - typeNameHandleFunc TypeNameHandleFunction // required - - // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError - defaultResponse *ResponseError - metadata map[string]interface{} - deprecated bool - contentEncodingEnabled *bool -} - -// Do evaluates each argument with the RouteBuilder itself. -// This allows you to follow DRY principles without breaking the fluent programming style. -// Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) -// -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } -func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { - for _, each := range oneArgBlocks { - each(b) - } - return b -} - -// To bind the route to a function. -// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. -func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { - b.function = function - return b -} - -// Method specifies what HTTP method to match. Required. -func (b *RouteBuilder) Method(method string) *RouteBuilder { - b.httpMethod = method - return b -} - -// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. -func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { - b.produces = mimeTypes - return b -} - -// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these -func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { - b.consumes = mimeTypes - return b -} - -// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". -func (b *RouteBuilder) Path(subPath string) *RouteBuilder { - b.currentPath = subPath - return b -} - -// Doc tells what this route is all about. Optional. -func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { - b.doc = documentation - return b -} - -// Notes is a verbose explanation of the operation behavior. Optional. -func (b *RouteBuilder) Notes(notes string) *RouteBuilder { - b.notes = notes - return b -} - -// Reads tells what resource type will be read from the request payload. Optional. -// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. -func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder { - fn := b.typeNameHandleFunc - if fn == nil { - fn = reflectTypeName - } - typeAsName := fn(sample) - description := "" - if len(optionalDescription) > 0 { - description = optionalDescription[0] - } - b.readSample = sample - bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}} - bodyParameter.beBody() - bodyParameter.Required(true) - bodyParameter.DataType(typeAsName) - b.Param(bodyParameter) - return b -} - -// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. -// Use this to modify or extend information for the Parameter (through its Data()). -func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { - for _, each := range b.parameters { - if each.Data().Name == name { - return each - } - } - return p -} - -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample - return b -} - -// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). -func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { - if b.parameters == nil { - b.parameters = []*Parameter{} - } - b.parameters = append(b.parameters, parameter) - return b -} - -// Operation allows you to document what the actual method/function call is of the Route. -// Unless called, the operation name is derived from the RouteFunction set using To(..). -func (b *RouteBuilder) Operation(name string) *RouteBuilder { - b.operation = name - return b -} - -// ReturnsError is deprecated, use Returns instead. -func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { - log.Print("ReturnsError is deprecated, use Returns instead.") - return b.Returns(code, message, model) -} - -// Returns allows you to document what responses (errors or regular) can be expected. -// The model parameter is optional ; either pass a struct instance or use nil if not applicable. -func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { - err := ResponseError{ - Code: code, - Message: message, - Model: model, - IsDefault: false, // this field is deprecated, use default response instead. - } - // lazy init because there is no NewRouteBuilder (yet) - if b.errorMap == nil { - b.errorMap = map[int]ResponseError{} - } - b.errorMap[code] = err - return b -} - -// DefaultReturns is a special Returns call that sets the default of the response. -func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { - b.defaultResponse = &ResponseError{ - Message: message, - Model: model, - } - return b -} - -// Metadata adds or updates a key=value pair to the metadata map. -func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { - if b.metadata == nil { - b.metadata = map[string]interface{}{} - } - b.metadata[key] = value - return b -} - -// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use -func (b *RouteBuilder) Deprecate() *RouteBuilder { - b.deprecated = true - return b -} - -// ResponseError represents a response; not necessarily an error. -type ResponseError struct { - Code int - Message string - Model interface{} - IsDefault bool -} - -func (b *RouteBuilder) servicePath(path string) *RouteBuilder { - b.rootPath = path - return b -} - -// Filter appends a FilterFunction to the end of filters for this Route to build. -func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { - b.filters = append(b.filters, filter) - return b -} - -// If sets a condition function that controls matching the Route based on custom logic. -// The condition function is provided the HTTP request and should return true if the route -// should be considered. -// -// Efficiency note: the condition function is called before checking the method, produces, and -// consumes criteria, so that the correct HTTP status code can be returned. -// -// Lifecycle note: no filter functions have been called prior to calling the condition function, -// so the condition function should not depend on any context that might be set up by container -// or route filters. -func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder { - b.conditions = append(b.conditions, condition) - return b -} - -// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response. -func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder { - b.contentEncodingEnabled = &enabled - return b -} - -// If no specific Route path then set to rootPath -// If no specific Produces then set to rootProduces -// If no specific Consumes then set to rootConsumes -func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { - if len(b.produces) == 0 { - b.produces = rootProduces - } - if len(b.consumes) == 0 { - b.consumes = rootConsumes - } -} - -// typeNameHandler sets the function that will convert types to strings in the parameter -// and model definitions. -func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder { - b.typeNameHandleFunc = handler - return b -} - -// Build creates a new Route using the specification details collected by the RouteBuilder -func (b *RouteBuilder) Build() Route { - pathExpr, err := newPathExpression(b.currentPath) - if err != nil { - log.Printf("Invalid path:%s because:%v", b.currentPath, err) - os.Exit(1) - } - if b.function == nil { - log.Printf("No function specified for route:" + b.currentPath) - os.Exit(1) - } - operationName := b.operation - if len(operationName) == 0 && b.function != nil { - // extract from definition - operationName = nameOfFunction(b.function) - } - route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - If: b.conditions, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - DefaultResponse: b.defaultResponse, - ReadSample: b.readSample, - WriteSample: b.writeSample, - Metadata: b.metadata, - Deprecated: b.deprecated, - contentEncodingEnabled: b.contentEncodingEnabled, - } - route.postBuild() - return route -} - -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") -} - -var anonymousFuncCount int32 - -// nameOfFunction returns the short name of the function f for documentation. -// It uses a runtime feature for debugging ; its value may change for later Go versions. -func nameOfFunction(f interface{}) string { - fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) - tokenized := strings.Split(fun.Name(), ".") - last := tokenized[len(tokenized)-1] - last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 - last = strings.TrimSuffix(last, ")-fm") // Go 1.5 - last = strings.TrimSuffix(last, "·fm") // < Go 1.5 - last = strings.TrimSuffix(last, "-fm") // Go 1.5 - if last == "func1" { // this could mean conflicts in API docs - val := atomic.AddInt32(&anonymousFuncCount, 1) - last = "func" + fmt.Sprintf("%d", val) - atomic.StoreInt32(&anonymousFuncCount, val) - } - return last -} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go deleted file mode 100644 index 19078af..0000000 --- a/vendor/github.com/emicklei/go-restful/router.go +++ /dev/null @@ -1,20 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "net/http" - -// A RouteSelector finds the best matching Route given the input HTTP Request -// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the -// path parameters after the route has been selected. -type RouteSelector interface { - - // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. - // It returns a selected Route and its containing WebService or an error indicating - // a problem. - SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) -} diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go deleted file mode 100644 index 62d1108..0000000 --- a/vendor/github.com/emicklei/go-restful/service_error.go +++ /dev/null @@ -1,23 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "fmt" - -// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. -type ServiceError struct { - Code int - Message string -} - -// NewError returns a ServiceError using the code and reason -func NewError(code int, message string) ServiceError { - return ServiceError{Code: code, Message: message} -} - -// Error returns a text representation of the service error -func (s ServiceError) Error() string { - return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) -} diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go deleted file mode 100644 index 77ba9a8..0000000 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ /dev/null @@ -1,290 +0,0 @@ -package restful - -import ( - "errors" - "os" - "reflect" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. -type WebService struct { - rootPath string - pathExpr *pathExpression // cached compilation of rootPath as RegExp - routes []Route - produces []string - consumes []string - pathParameters []*Parameter - filters []FilterFunction - documentation string - apiVersion string - - typeNameHandleFunc TypeNameHandleFunction - - dynamicRoutes bool - - // protects 'routes' if dynamic routes are enabled - routesLock sync.RWMutex -} - -func (w *WebService) SetDynamicRoutes(enable bool) { - w.dynamicRoutes = enable -} - -// TypeNameHandleFunction declares functions that can handle translating the name of a sample object -// into the restful documentation for the service. -type TypeNameHandleFunction func(sample interface{}) string - -// TypeNameHandler sets the function that will convert types to strings in the parameter -// and model definitions. If not set, the web service will invoke -// reflect.TypeOf(object).String(). -func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService { - w.typeNameHandleFunc = handler - return w -} - -// reflectTypeName is the default TypeNameHandleFunction and for a given object -// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via -// the reflection API. -func reflectTypeName(sample interface{}) string { - return reflect.TypeOf(sample).String() -} - -// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. -func (w *WebService) compilePathExpression() { - compiled, err := newPathExpression(w.rootPath) - if err != nil { - log.Printf("invalid path:%s because:%v", w.rootPath, err) - os.Exit(1) - } - w.pathExpr = compiled -} - -// ApiVersion sets the API version for documentation purposes. -func (w *WebService) ApiVersion(apiVersion string) *WebService { - w.apiVersion = apiVersion - return w -} - -// Version returns the API version for documentation purposes. -func (w *WebService) Version() string { return w.apiVersion } - -// Path specifies the root URL template path of the WebService. -// All Routes will be relative to this path. -func (w *WebService) Path(root string) *WebService { - w.rootPath = root - if len(w.rootPath) == 0 { - w.rootPath = "/" - } - w.compilePathExpression() - return w -} - -// Param adds a PathParameter to document parameters used in the root path. -func (w *WebService) Param(parameter *Parameter) *WebService { - if w.pathParameters == nil { - w.pathParameters = []*Parameter{} - } - w.pathParameters = append(w.pathParameters, parameter) - return w -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) PathParameter(name, description string) *Parameter { - return PathParameter(name, description) -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func PathParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} - p.bePath() - return p -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) QueryParameter(name, description string) *Parameter { - return QueryParameter(name, description) -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} - p.beQuery() - return p -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func (w *WebService) BodyParameter(name, description string) *Parameter { - return BodyParameter(name, description) -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func BodyParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} - p.beBody() - return p -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) HeaderParameter(name, description string) *Parameter { - return HeaderParameter(name, description) -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func HeaderParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beHeader() - return p -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) FormParameter(name, description string) *Parameter { - return FormParameter(name, description) -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func FormParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beForm() - return p -} - -// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. -func (w *WebService) Route(builder *RouteBuilder) *WebService { - w.routesLock.Lock() - defer w.routesLock.Unlock() - builder.copyDefaults(w.produces, w.consumes) - w.routes = append(w.routes, builder.Build()) - return w -} - -// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' -func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := make([]Route, (len(w.routes) - 1)) - current := 0 - for ix := range w.routes { - if w.routes[ix].Method == method && w.routes[ix].Path == path { - continue - } - newRoutes[current] = w.routes[ix] - current = current + 1 - } - w.routes = newRoutes - return nil -} - -// Method creates a new RouteBuilder and initialize its http method -func (w *WebService) Method(httpMethod string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod) -} - -// Produces specifies that this WebService can produce one or more MIME types. -// Http requests must have one of these values set for the Accept header. -func (w *WebService) Produces(contentTypes ...string) *WebService { - w.produces = contentTypes - return w -} - -// Consumes specifies that this WebService can consume one or more MIME types. -// Http requests must have one of these values set for the Content-Type header. -func (w *WebService) Consumes(accepts ...string) *WebService { - w.consumes = accepts - return w -} - -// Routes returns the Routes associated with this WebService -func (w *WebService) Routes() []Route { - if !w.dynamicRoutes { - return w.routes - } - // Make a copy of the array to prevent concurrency problems - w.routesLock.RLock() - defer w.routesLock.RUnlock() - result := make([]Route, len(w.routes)) - for ix := range w.routes { - result[ix] = w.routes[ix] - } - return result -} - -// RootPath returns the RootPath associated with this WebService. Default "/" -func (w *WebService) RootPath() string { - return w.rootPath -} - -// PathParameters return the path parameter names for (shared among its Routes) -func (w *WebService) PathParameters() []*Parameter { - return w.pathParameters -} - -// Filter adds a filter function to the chain of filters applicable to all its Routes -func (w *WebService) Filter(filter FilterFunction) *WebService { - w.filters = append(w.filters, filter) - return w -} - -// Doc is used to set the documentation of this service. -func (w *WebService) Doc(plainText string) *WebService { - w.documentation = plainText - return w -} - -// Documentation returns it. -func (w *WebService) Documentation() string { - return w.documentation -} - -/* - Convenience methods -*/ - -// HEAD is a shortcut for .Method("HEAD").Path(subPath) -func (w *WebService) HEAD(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath) -} - -// GET is a shortcut for .Method("GET").Path(subPath) -func (w *WebService) GET(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath) -} - -// POST is a shortcut for .Method("POST").Path(subPath) -func (w *WebService) POST(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath) -} - -// PUT is a shortcut for .Method("PUT").Path(subPath) -func (w *WebService) PUT(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath) -} - -// PATCH is a shortcut for .Method("PATCH").Path(subPath) -func (w *WebService) PATCH(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath) -} - -// DELETE is a shortcut for .Method("DELETE").Path(subPath) -func (w *WebService) DELETE(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) -} diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go deleted file mode 100644 index c9d31b0..0000000 --- a/vendor/github.com/emicklei/go-restful/web_service_container.go +++ /dev/null @@ -1,39 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" -) - -// DefaultContainer is a restful.Container that uses http.DefaultServeMux -var DefaultContainer *Container - -func init() { - DefaultContainer = NewContainer() - DefaultContainer.ServeMux = http.DefaultServeMux -} - -// If set the true then panics will not be caught to return HTTP 500. -// In that case, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. -// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) -var DoNotRecover = false - -// Add registers a new WebService add it to the DefaultContainer. -func Add(service *WebService) { - DefaultContainer.Add(service) -} - -// Filter appends a container FilterFunction from the DefaultContainer. -// These are called before dispatching a http.Request to a WebService. -func Filter(filter FilterFunction) { - DefaultContainer.Filter(filter) -} - -// RegisteredWebServices returns the collections of WebServices from the DefaultContainer -func RegisteredWebServices() []*WebService { - return DefaultContainer.RegisteredWebServices() -} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad8958..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 4cd0cba..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c3016..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 5ab5d41..0000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index be4d7ea..0000000 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,317 +0,0 @@ -# Changelog - -## v1.4.7 / 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## v1.4.2 / 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## v1.4.1 / 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## v1.4.0 / 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## v1.3.1 / 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## v1.3.0 / 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## v1.2.10 / 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## v1.2.9 / 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## v1.2.8 / 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## v1.2.5 / 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## v1.2.1 / 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index 828a60b..0000000 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index e180c8f..0000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index b2629e5..0000000 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# File system notifications for Go - -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) - -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: - -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. - -## Usage - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} -``` - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - -## FAQ - -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** - -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 - -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md - -## Related Projects - -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) - diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index ced39cb..0000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 89cab04..0000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "errors" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index d9fd1b8..0000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b33f2b4..0000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 86e76a3..0000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 2306c46..0000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 870c4d6..0000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 09436f3..0000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/github.com/go-logr/logr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index aca17f3..0000000 --- a/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# A more minimal logging API for Go - -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what -he has to say, and it largely aligns with my own experiences. Too many -choices of levels means inconsistent logs. - -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. - -# Differences from Dave's ideas - -The main differences are: - -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2) Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -This is a BETA grade API. - -There are implementations for the following logging libraries: - -- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) -- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): - [stdr](https://github.com/go-logr/stdr) -- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - -# FAQ - -## Conceptual - -## Why structured logging? - -- **Structured logs are more easily queriable**: Since you've got - key-value pairs, it's much easier to query your structured logs for - particular values by filtering on the contents of a particular key -- - think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc - -- **Structured logging makes it easier to have cross-referencable logs**: - Similarly to searchability, if you maintain conventions around your - keys, it becomes easy to gather all log lines related to a particular - concept. - -- **Structured logs allow better dimensions of filtering**: if you have - structure to your logs, you've got more precise control over how much - information is logged -- you might choose in a particular configuration - to log certain keys but not others, only log lines where a certain key - matches a certain value, etc, instead of just having v-levels and names - to key off of. - -- **Structured logs better represent structured data**: sometimes, the - data that you want to log is inherently structured (think tuple-link - objects). Structured logs allow you to preserve that structure when - outputting. - -## Why V-levels? - -**V-levels give operators an easy way to control the chattiness of log -operations**. V-levels provide a way for a given package to distinguish -the relative importance or verbosity of a given log message. Then, if -a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. - -## Why not more named levels, like Warning? - -Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences -from Dave's ideas](#differences-from-daves-ideas). - -## Why not allow format strings, too? - -**Format strings negate many of the benefits of structured logs**: - -- They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc - -- They don't store structured data well, since contents are flattened into - a string - -- They're not cross-referencable - -- They don't compress easily, since the message is not constant - -(unless you turn positional parameters into key-value pairs with numerical -keys, at which point you've gotten key-value logging with meaningless -keys) - -## Practical - -## Why key-value pairs, and not a map? - -Key-value pairs are *much* easier to optimize, especially around -allocations. Zap (a structured logger that inspired logr's interface) has -[performance measurements](https://github.com/uber-go/zap#performance) -that show this quite nicely. - -While the interface ends up being a little less obvious, you get -potentially better performance, plus avoid making users type -`map[string]string{}` every time they want to log. - -## What if my V-levels differ between libraries? - -That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` function to pass different loggers to different libraries. - -Generally, you should take care to ensure that you have relatively -consistent V-levels within a given logger, however, as this makes deciding -on what verbosity of logs to request easier. - -## But I *really* want to use a format string! - -That's not actually a question. Assuming your question is "how do -I convert my mental model of logging with format strings to logging with -constant messages": - -1. figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message - -2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair - -For instance, consider the following examples (all taken from spots in the -Kubernetes codebase): - -- `klog.V(4).Infof("Client is returning errors: code %v, error %v", - responseCode, err)` becomes `logger.Error(err, "client returned an - error", "code", responseCode)` - -- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", - seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after - response when requesting url", "attempt", retries, "after - seconds", seconds, "url", url)` - -If you *really* must use a format string, place it as a key value, and -call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to -reflect over type %T")` becomes `logger.Info("unable to reflect over -type", "type", fmt.Sprintf("%T"))`. In general though, the cases where -this is necessary should be few and far between. - -## How do I choose my V-levels? - -This is basically the only hard constraint: increase V-levels to denote -more verbose or more debug-y logs. - -Otherwise, you can start out with `0` as "you always want to see this", -`1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack". - -Then gradually choose levels in between as you need them, working your way -down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). - -## How do I choose my keys - -- make your keys human-readable -- constant keys are generally a good idea -- be consistent across your codebase -- keys should naturally match parts of the message string - -While key names are mostly unrestricted (and spaces are acceptable), -it's generally a good idea to stick to printable ascii characters, or at -least match the general character set of your log lines. - -[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go deleted file mode 100644 index 520c4fe..0000000 --- a/vendor/github.com/go-logr/logr/logr.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. -// -// This design derives from Dave Cheney's blog: -// http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. -// -// The logging specifically makes it non-trivial to use format strings, to encourage -// attaching structured information instead of unstructured format strings. -// -// Usage -// -// Logging is done using a Logger. Loggers can have name prefixes and named -// values attached, so that all log messages logged with that Logger have some -// base context associated. -// -// The term "key" is used to refer to the name associated with a particular -// value, to disambiguate it from the general Logger name. -// -// For instance, suppose we're trying to reconcile the state of an object, and -// we want to log that we've made some decision. -// -// With the traditional log package, we might write: -// log.Printf( -// "decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) -// -// With logr's structured logging, we'd write: -// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", -// // and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") -// -// // later on... -// log.Info("setting field foo on object", "value", targetValue, "object", object) -// -// Depending on our logging implementation, we could then make logging decisions -// based on field values (like only logging such events for objects in a certain -// namespace), or copy the structured information into a structured log store. -// -// For logging errors, Logger has a method called Error. Suppose we wanted to -// log an error while reconciling. With the traditional log package, we might -// write: -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) -// -// With logr, we'd instead write: -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) -// -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) -// -// However, it ensures that a standard key for the error value ("error") is used -// across all error logging. Furthermore, certain implementations may choose to -// attach additional information (such as stack traces) on calls to Error, so -// it's preferred to use Error to log errors. -// -// Parts of a log line -// -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. -// -// The Logger name constists of a series of name "segments" added by successive -// calls to WithName. These name segments will be joined in some way by the -// underlying implementation. It is strongly reccomended that name segements -// contain simple identifiers (letters, digits, and hyphen), and do not contain -// characters that could muddle the log output or confuse the joining operation -// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). -// -// Log verbosity represents how little a log matters. Level zero, the default, -// matters most. Increasing levels matter less and less. Try to avoid lots of -// different verbosity levels, and instead provide useful keys, logger names, -// and log messages for users to filter on. It's illegal to pass a log level -// below zero. -// -// The log message consists of a constant message attached to the the log line. -// This should generally be a simple description of what's occuring, and should -// never be a format string. -// -// Variable information can then be attached using named values (key/value -// pairs). Keys are arbitrary strings, while values may be any Go value. -// -// Key Naming Conventions -// -// Keys are not strictly required to conform to any specification or regex, but -// it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// -// These guidelines help ensure that log data is processed properly regardless -// of the log implementation. For example, log implementations will try to -// output JSON data or will store data for later database (e.g. SQL) queries. -// -// While users are generally free to use key names of their choice, it's -// generally best to avoid using the following keys, as they're frequently used -// by implementations: -// -// - `"caller"`: the calling information (file/line) of a particular log line. -// - `"error"`: the underlying error value in the `Error` method. -// - `"level"`: the log level. -// - `"logger"`: the name of the associated logger. -// - `"msg"`: the log message. -// - `"stacktrace"`: the stack trace associated with a particular log line or -// error (often from the `Error` message). -// - `"ts"`: the timestamp for a log line. -// -// Implementations are encouraged to make use of these keys to represent the -// above concepts, when neccessary (for example, in a pure-JSON output form, it -// would be necessary to represent at least message and timestamp as ordinary -// named values). -package logr - -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats - -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // Enabled tests whether this Logger is enabled. For example, commandline - // flags might be used to set the logging verbosity and disable some info - // logs. - Enabled() bool - - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) - - // V returns an Logger value for a specific verbosity level, relative to - // this Logger. In other words, V values are additive. V higher verbosity - // level means a log message is less important. It's illegal to pass a log - // level less than zero. - V(level int) Logger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly reccomended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger -} diff --git a/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md deleted file mode 100644 index c88f9b2..0000000 --- a/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md +++ /dev/null @@ -1,50 +0,0 @@ -# How to Contribute - -## Getting Started - -- Fork the repository on GitHub -- Read the [README](README.markdown) for build and test instructions -- Play with the project, submit bugs, submit patches! - -## Contribution Flow - -This is a rough outline of what a contributor's workflow looks like: - -- Create a topic branch from where you want to base your work (usually master). -- Make commits of logical units. -- Make sure your commit messages are in the proper format (see below). -- Push your changes to a topic branch in your fork of the repository. -- Make sure the tests pass, and add any new tests as appropriate. -- Submit a pull request to the original repository. - -Thanks for your contributions! - -### Format of the Commit Message - -We follow a rough convention for commit messages that is designed to answer two -questions: what changed and why. The subject line should feature the what and -the body of the commit should describe the why. - -``` -scripts: add the test-cluster command - -this uses tmux to setup a test cluster that you can easily kill and -start for debugging. - -Fixes #38 -``` - -The format can be described more formally as follows: - -``` -: - - - -