Import Upstream version 1.6.24~ds1
This commit is contained in:
parent
5869e15aaf
commit
769048e199
|
@ -1,73 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a bug report to help improve containerd
|
||||
title: ''
|
||||
labels: kind/bug
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
<!--
|
||||
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||
already open. You can ensure this by searching the issue list for this
|
||||
repository. If there is a duplicate, please close your issue and add a comment
|
||||
to the existing issue instead.
|
||||
-->
|
||||
|
||||
**Description**
|
||||
|
||||
<!--
|
||||
Briefly describe the problem you are having in a few paragraphs.
|
||||
-->
|
||||
|
||||
**Steps to reproduce the issue:**
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
**Describe the results you received:**
|
||||
|
||||
|
||||
**Describe the results you expected:**
|
||||
|
||||
|
||||
**What version of containerd are you using:**
|
||||
|
||||
```
|
||||
$ containerd --version
|
||||
|
||||
```
|
||||
|
||||
**Any other relevant information (runC version, CRI configuration, OS/Kernel version, etc.):**
|
||||
|
||||
<!--
|
||||
Tips:
|
||||
|
||||
* If containerd gets stuck on something and enables debug socket, `ctr pprof goroutines`
|
||||
dumps the golang stack of containerd, which is helpful! If containerd runs
|
||||
without debug socket, `kill -SIGUSR1 $(pidof containerd)` also dumps the stack
|
||||
as well.
|
||||
|
||||
* If there is something about running containerd, like consuming more CPU resources,
|
||||
`ctr pprof` subcommands will help you to get some useful profiles. Enable debug
|
||||
socket makes life easier.
|
||||
-->
|
||||
|
||||
<details><summary><code>runc --version</code></summary><br><pre>
|
||||
$ runc --version
|
||||
|
||||
</pre></details>
|
||||
|
||||
<!--
|
||||
Show related configuration if it is related to CRI plugin.
|
||||
-->
|
||||
|
||||
<details><summary><code>crictl info</code></summary><br><pre>
|
||||
$ crictl info
|
||||
|
||||
</pre></details>
|
||||
|
||||
|
||||
<details><summary><code>uname -a</code></summary><br><pre>
|
||||
$ uname -a
|
||||
|
||||
</pre></details>
|
|
@ -0,0 +1,70 @@
|
|||
name: Bug report
|
||||
description: Create a bug report to help improve containerd
|
||||
labels: kind/bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||
already open. You can ensure this by searching the issue list for this
|
||||
repository. If there is a duplicate, please close your issue and add a comment
|
||||
to the existing issue instead.
|
||||
|
||||
Please have a look on the following tips before opening the issue:
|
||||
|
||||
<details>
|
||||
* If containerd gets stuck on something and enables debug socket, `ctr pprof goroutines`
|
||||
dumps the golang stack of containerd, which is helpful! If containerd runs
|
||||
without debug socket, `kill -SIGUSR1 $(pidof containerd)` also dumps the stack
|
||||
as well.
|
||||
|
||||
* If there is something about running containerd, like consuming more CPU resources,
|
||||
`ctr pprof` subcommands will help you to get some useful profiles. Enable debug
|
||||
socket makes life easier.
|
||||
|
||||
* `ctr` can't be used for testing CRI configs, as it does not use CRI API.
|
||||
</details>
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
Briefly describe the problem you are having in a few paragraphs.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce the issue
|
||||
value: |
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the results you received and expected
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: What version of containerd are you using?
|
||||
placeholder: $ containerd --version
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Any other relevant information
|
||||
description: |
|
||||
runc version, CRI configuration, OS/Kernel version, etc.
|
||||
Use the following commands:
|
||||
$ runc --version
|
||||
$ crictl info (if you use Kubernetes)
|
||||
$ uname -a
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Show configuration if it is related to CRI plugin.
|
||||
placeholder: $ cat /etc/containerd/config.toml
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for containerd
|
||||
title: ''
|
||||
labels: kind/feature
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**What is the problem you're trying to solve**
|
||||
A clear and concise description of what the problem is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you'd like to happen.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
|
@ -0,0 +1,25 @@
|
|||
name: Feature request
|
||||
description: Suggest an idea for containerd
|
||||
labels: kind/feature
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What is the problem you're trying to solve
|
||||
description: |
|
||||
A clear and concise description of what the problem is.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the solution you'd like
|
||||
description: |
|
||||
A clear and concise description of what you'd like to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: |
|
||||
Add any other context about the feature request here.
|
|
@ -0,0 +1,167 @@
|
|||
name: "Build volume test images"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push_to_project:
|
||||
description: "Project to build images for"
|
||||
required: true
|
||||
default: "ghcr.io/containerd"
|
||||
azure_windows_image_id:
|
||||
description: Windows image URN to deploy
|
||||
required: true
|
||||
default: MicrosoftWindowsServer:WindowsServer:2022-datacenter:20348.350.2111030009
|
||||
azure_vm_size:
|
||||
description: Windows image builder VM size
|
||||
required: true
|
||||
default: Standard_D2s_v3
|
||||
azure_location:
|
||||
description: The Azure region to deploy to
|
||||
required: true
|
||||
default: westeurope
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }}
|
||||
DEFAULT_ADMIN_USERNAME: azureuser
|
||||
SSH_OPTS: "-o ServerAliveInterval=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
||||
AZURE_RESOURCE_GROUP: ctrd-test-image-build-${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
images:
|
||||
permissions:
|
||||
packages: write
|
||||
name: "Build volume test images"
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.20.8"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- name: Set env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install docker
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y ca-certificates curl gnupg lsb-release
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg > /tmp/docker.gpg
|
||||
sudo gpg --yes --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg /tmp/docker.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt update
|
||||
sudo apt install -y docker-ce docker-ce-cli containerd.io jq
|
||||
sudo adduser $USER docker
|
||||
|
||||
- name: Generate ssh key pair
|
||||
run: |
|
||||
mkdir -p $HOME/.ssh/
|
||||
ssh-keygen -t rsa -b 4096 -C "ci@containerd.com" -f $HOME/.ssh/id_rsa -q -N ""
|
||||
echo "SSH_PUB_KEY=$(cat ~/.ssh/id_rsa.pub)" >> $GITHUB_ENV
|
||||
|
||||
- name: Azure Login
|
||||
uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDS }}
|
||||
|
||||
- name: Create Azure Resource Group
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
az group create -n ${{ env.AZURE_RESOURCE_GROUP }} -l ${{ github.event.inputs.azure_location }} --tags creationTimestamp=$(date +%Y-%m-%dT%T%z)
|
||||
|
||||
- name: Create Windows Helper VM
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
PASSWORD="$(/usr/bin/tr -dc "a-zA-Z0-9@#$%^&*()_+?><~\`;" < /dev/urandom | /usr/bin/head -c 24; echo '')"
|
||||
az vm create -n WinDockerHelper \
|
||||
--admin-username ${{ env.DEFAULT_ADMIN_USERNAME }} \
|
||||
--public-ip-sku Basic \
|
||||
--admin-password "::add-mask::$PASSWORD" \
|
||||
--image ${{ github.event.inputs.azure_windows_image_id }} \
|
||||
-g ${{ env.AZURE_RESOURCE_GROUP }} \
|
||||
--size ${{ github.event.inputs.azure_vm_size }}
|
||||
az vm open-port --resource-group ${{ env.AZURE_RESOURCE_GROUP }} --name WinDockerHelper --port 22 --priority 101
|
||||
az vm open-port --resource-group ${{ env.AZURE_RESOURCE_GROUP }} --name WinDockerHelper --port 2376 --priority 102
|
||||
|
||||
- name: Prepare Windows image helper
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
# Installs Windows features, opens SSH and Docker port
|
||||
az vm run-command invoke \
|
||||
--command-id RunPowerShellScript \
|
||||
-n WinDockerHelper \
|
||||
-g ${{ env.AZURE_RESOURCE_GROUP }} \
|
||||
--scripts @$GITHUB_WORKSPACE/src/github.com/containerd/containerd/script/setup/prepare_windows_docker_helper.ps1
|
||||
# The prepare_windows_docker_helper.ps1 script reboots the server after enabling the Windows features
|
||||
# Give it a chance to reboot. Running another run-command via azure CLI should work even without this
|
||||
# sleep, but we want to avoid the possibility that it may run before the server reboots.
|
||||
sleep 30
|
||||
# Enable SSH and import public key
|
||||
az vm run-command invoke \
|
||||
--command-id RunPowerShellScript \
|
||||
-n WinDockerHelper \
|
||||
-g ${{ env.AZURE_RESOURCE_GROUP }} \
|
||||
--scripts @$GITHUB_WORKSPACE/src/github.com/containerd/containerd/script/setup/enable_ssh_windows.ps1 \
|
||||
--parameters 'SSHPublicKey=${{ env.SSH_PUB_KEY }}'
|
||||
|
||||
- name: Get Windows Helper IPs
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
VM_DETAILS=$(az vm show -d -g ${{ env.AZURE_RESOURCE_GROUP }} -n WinDockerHelper -o json)
|
||||
echo "PUBLIC_IP=$(echo $VM_DETAILS | jq -r .publicIps)" >> $GITHUB_ENV
|
||||
echo "PRIVATE_IP=$(echo $VM_DETAILS | jq -r .privateIps)" >> $GITHUB_ENV
|
||||
|
||||
- name: Enable Docker TLS
|
||||
shell: bash
|
||||
run: |
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} $GITHUB_WORKSPACE/src/github.com/containerd/containerd/script/setup/enable_docker_tls_on_windows.ps1 azureuser@${{ env.PUBLIC_IP }}:/enable_docker_tls_on_windows.ps1
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }} "powershell.exe -command { C:/enable_docker_tls_on_windows.ps1 -IPAddresses ${{ env.PUBLIC_IP }},${{ env.PRIVATE_IP }} }"
|
||||
|
||||
- name: Fetch client certificate and key
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p $HOME/.docker
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }}:/Users/azureuser/.docker/ca.pem $HOME/.docker/ca.pem
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }}:/Users/azureuser/.docker/cert.pem $HOME/.docker/cert.pem
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.PUBLIC_IP }}:/Users/azureuser/.docker/key.pem $HOME/.docker/key.pem
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push images
|
||||
shell: bash
|
||||
run: |
|
||||
make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-copy-up setup-buildx
|
||||
|
||||
make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-copy-up build-registry PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376
|
||||
make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-copy-up push-manifest PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376
|
||||
|
||||
make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-ownership build-registry PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376
|
||||
make -C $GITHUB_WORKSPACE/src/github.com/containerd/containerd/integration/images/volume-ownership push-manifest PROJ=${{ github.event.inputs.push_to_project }} REMOTE_DOCKER_URL=${{ env.PUBLIC_IP }}:2376
|
||||
|
||||
- name: Cleanup resources
|
||||
if: always()
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
az group delete -g ${{ env.AZURE_RESOURCE_GROUP }} --yes
|
|
@ -2,77 +2,91 @@ name: CI
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- 'release/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- 'release/**'
|
||||
|
||||
env:
|
||||
# Go version we currently use to build containerd across all CI.
|
||||
# Note: don't forget to update `Binaries` step, as it contains the matrix of all supported Go versions.
|
||||
GO_VERSION: "1.20.8"
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
#
|
||||
# golangci-lint
|
||||
#
|
||||
linters:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Linters
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 10
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.12]
|
||||
os: [ubuntu-18.04, macos-10.15, windows-2019]
|
||||
os: [ubuntu-20.04, macos-12, windows-2019]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- name: Set env
|
||||
shell: bash
|
||||
- name: Install dependencies
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libbtrfs-dev
|
||||
|
||||
- uses: golangci/golangci-lint-action@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
version: v1.36.0
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
args: --timeout=5m
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.51.1
|
||||
skip-cache: true
|
||||
args: --timeout=8m
|
||||
|
||||
#
|
||||
# Project checks
|
||||
#
|
||||
project:
|
||||
name: Project Checks
|
||||
runs-on: ubuntu-18.04
|
||||
if: github.repository == 'containerd/containerd'
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 5
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- shell: bash
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
fetch-depth: 100
|
||||
|
||||
- uses: containerd/project-checks@v1
|
||||
- uses: containerd/project-checks@v1.1.0
|
||||
with:
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
repo-access-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: verify go modules and vendor directory
|
||||
run: |
|
||||
sudo apt-get install -y jq
|
||||
make verify-vendor
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
#
|
||||
# Protobuf checks
|
||||
#
|
||||
protos:
|
||||
name: Protobuf
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 5
|
||||
|
||||
defaults:
|
||||
|
@ -80,11 +94,11 @@ jobs:
|
|||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
|
@ -92,7 +106,6 @@ jobs:
|
|||
shell: bash
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "GO111MODULE=off" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install protobuf
|
||||
|
@ -109,34 +122,22 @@ jobs:
|
|||
|
||||
man:
|
||||
name: Manpages
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 5
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
|
||||
- name: Set env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- run: GO111MODULE=on go get github.com/cpuguy83/go-md2man/v2@v2.0.0
|
||||
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- uses: actions/checkout@v3
|
||||
- run: go install github.com/cpuguy83/go-md2man/v2@v2.0.1
|
||||
- run: make man
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
# Make sure binaries compile with other platforms
|
||||
crossbuild:
|
||||
name: Crossbuild Binaries
|
||||
needs: [project, linters, protos, man]
|
||||
runs-on: ubuntu-18.04
|
||||
needs: [linters, protos, man]
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -150,6 +151,10 @@ jobs:
|
|||
- goos: linux
|
||||
goarch: arm
|
||||
goarm: "5"
|
||||
- goos: linux
|
||||
goarch: ppc64le
|
||||
- goos: linux
|
||||
goarch: riscv64
|
||||
- goos: freebsd
|
||||
goarch: amd64
|
||||
- goos: freebsd
|
||||
|
@ -159,21 +164,14 @@ jobs:
|
|||
goarm: "7"
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
- name: Set env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- uses: actions/checkout@v3
|
||||
- run: |
|
||||
set -e -x
|
||||
|
||||
packages=""
|
||||
packages="libbtrfs-dev"
|
||||
platform="${{matrix.goos}}/${{matrix.goarch}}"
|
||||
if [ -n "${{matrix.goarm}}" ]; then
|
||||
platform+="/v${{matrix.goarm}}"
|
||||
|
@ -195,6 +193,16 @@ jobs:
|
|||
echo "CGO_ENABLED=1" >> $GITHUB_ENV
|
||||
echo "CC=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
;;
|
||||
linux/ppc64le)
|
||||
packages+=" crossbuild-essential-ppc64el"
|
||||
echo "CGO_ENABLED=1" >> $GITHUB_ENV
|
||||
echo "CC=powerpc64le-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
;;
|
||||
linux/riscv64)
|
||||
packages+=" crossbuild-essential-riscv64"
|
||||
echo "CGO_ENABLED=1" >> $GITHUB_ENV
|
||||
echo "CC=riscv64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
;;
|
||||
windows/arm/v7)
|
||||
echo "CGO_ENABLED=0" >> $GITHUB_ENV
|
||||
;;
|
||||
|
@ -203,9 +211,8 @@ jobs:
|
|||
if [ -n "${packages}" ]; then
|
||||
sudo apt-get update && sudo apt-get install -y ${packages}
|
||||
fi
|
||||
name: install deps
|
||||
name: Install deps
|
||||
- name: Build
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
env:
|
||||
GOOS: ${{matrix.goos}}
|
||||
GOARCH: ${{matrix.goarch}}
|
||||
|
@ -221,19 +228,20 @@ jobs:
|
|||
name: Binaries
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 10
|
||||
needs: [project, linters, protos, man]
|
||||
needs: [linters, protos, man]
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-18.04, macos-10.15, windows-2019]
|
||||
go-version: ['1.16.12']
|
||||
include:
|
||||
# Go 1.13.x is still used by Docker/Moby
|
||||
- go-version: '1.13.x'
|
||||
os: ubuntu-18.04
|
||||
|
||||
os: [ubuntu-20.04, macos-12, windows-2019, windows-2022]
|
||||
go-version: ["1.20.8", "1.19.12"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- name: Install dependencies
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libbtrfs-dev
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
|
@ -243,7 +251,7 @@ jobs:
|
|||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
|
@ -258,27 +266,32 @@ jobs:
|
|||
#
|
||||
integration-windows:
|
||||
name: Windows Integration
|
||||
runs-on: windows-2019
|
||||
timeout-minutes: 30
|
||||
needs: [project, linters, protos, man]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 35
|
||||
needs: [linters, protos, man]
|
||||
env:
|
||||
GOTEST: gotestsum --
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [windows-2019, windows-2022]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: Microsoft/hcsshim
|
||||
path: src/github.com/Microsoft/hcsshim
|
||||
|
@ -321,10 +334,11 @@ jobs:
|
|||
- name: Integration 2
|
||||
env:
|
||||
TESTFLAGS_PARALLEL: 1
|
||||
EXTRA_TESTFLAGS: "-short"
|
||||
CGO_ENABLED: 1
|
||||
GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-parallel-junit.xml
|
||||
run: mingw32-make.exe integration
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: TestResults Windows
|
||||
|
@ -333,9 +347,9 @@ jobs:
|
|||
|
||||
integration-linux:
|
||||
name: Linux Integration
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 40
|
||||
needs: [project, linters, protos, man]
|
||||
needs: [linters, protos, man]
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -351,116 +365,88 @@ jobs:
|
|||
env:
|
||||
GOTEST: gotestsum --
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install containerd dependencies
|
||||
env:
|
||||
RUNC_FLAVOR: ${{ matrix.runc }}
|
||||
run: |
|
||||
sudo apt-get install -y gperf
|
||||
sudo -E PATH=$PATH script/setup/install-seccomp
|
||||
sudo -E PATH=$PATH script/setup/install-runc
|
||||
sudo -E PATH=$PATH script/setup/install-cni
|
||||
sudo -E PATH=$PATH script/setup/install-critools
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
sudo apt-get install -y gperf libbtrfs-dev
|
||||
script/setup/install-seccomp
|
||||
script/setup/install-runc
|
||||
script/setup/install-cni $(grep containernetworking/plugins go.mod | awk '{print $2}')
|
||||
script/setup/install-critools
|
||||
script/setup/install-failpoint-binaries
|
||||
|
||||
- name: Install criu
|
||||
run: |
|
||||
sudo apt-get install -y \
|
||||
libprotobuf-dev \
|
||||
libprotobuf-c-dev \
|
||||
protobuf-c-compiler \
|
||||
protobuf-compiler \
|
||||
python-protobuf \
|
||||
libnl-3-dev \
|
||||
libnet-dev \
|
||||
libcap-dev \
|
||||
python-future
|
||||
wget https://github.com/checkpoint-restore/criu/archive/v3.13.tar.gz -O criu.tar.gz
|
||||
tar -zxf criu.tar.gz
|
||||
cd criu-3.13
|
||||
sudo make install-criu
|
||||
sudo add-apt-repository ppa:criu/ppa
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y criu
|
||||
|
||||
- name: Install containerd
|
||||
env:
|
||||
CGO_ENABLED: 1
|
||||
run: |
|
||||
make binaries
|
||||
make binaries GO_BUILD_FLAGS="-mod=vendor"
|
||||
sudo -E PATH=$PATH make install
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
- run: sudo -E PATH=$PATH script/setup/install-gotestsum
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
- run: script/setup/install-gotestsum
|
||||
- name: Tests
|
||||
env:
|
||||
GOPROXY: direct
|
||||
GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-unit-root-junit.xml
|
||||
run: |
|
||||
make test
|
||||
sudo -E PATH=$PATH make root-test
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
- name: Integration 1
|
||||
env:
|
||||
GOPROXY: direct
|
||||
TEST_RUNTIME: ${{ matrix.runtime }}
|
||||
RUNC_FLAVOR: ${{ matrix.runc }}
|
||||
GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-serial-junit.xml
|
||||
run: |
|
||||
sudo -E PATH=$PATH make integration EXTRA_TESTFLAGS=-no-criu TESTFLAGS_RACE=-race
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
extraflags=""
|
||||
[ "${RUNC_FLAVOR}" == "crun" ] && {
|
||||
extraflags="EXTRA_TESTFLAGS=-no-criu";
|
||||
}
|
||||
sudo -E PATH=$PATH make integration ${extraflags} TESTFLAGS_RACE=-race
|
||||
|
||||
# Run the integration suite a second time. See discussion in github.com/containerd/containerd/pull/1759
|
||||
- name: Integration 2
|
||||
env:
|
||||
GOPROXY: direct
|
||||
TEST_RUNTIME: ${{ matrix.runtime }}
|
||||
RUNC_FLAVOR: ${{ matrix.runc }}
|
||||
GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-parallel-junit.xml
|
||||
run: |
|
||||
sudo -E PATH=$PATH TESTFLAGS_PARALLEL=1 make integration EXTRA_TESTFLAGS=-no-criu
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
# CRIU wouldn't work with overlay snapshotter yet.
|
||||
# See https://github.com/containerd/containerd/pull/4708#issuecomment-724322294.
|
||||
- name: CRIU Integration
|
||||
env:
|
||||
GOPROXY: direct
|
||||
TEST_RUNTIME: ${{ matrix.runtime }}
|
||||
RUNC_FLAVOR: ${{ matrix.runc }}
|
||||
GOTESTSUM_JUNITFILE: ${{github.workspace}}/test-integration-criu-junit.xml
|
||||
# crun doesn't have "checkpoint" command.
|
||||
if: ${{ matrix.runc == 'runc' }}
|
||||
run: |
|
||||
sudo -E PATH=$PATH \
|
||||
TESTFLAGS_PARALLEL=1 \
|
||||
TEST_SNAPSHOTTER=native \
|
||||
make integration EXTRA_TESTFLAGS='-run TestCheckpoint'
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
extraflags=""
|
||||
[ "${RUNC_FLAVOR}" == "crun" ] && {
|
||||
extraflags="EXTRA_TESTFLAGS=-no-criu";
|
||||
}
|
||||
sudo -E PATH=$PATH TESTFLAGS_PARALLEL=1 make integration ${extraflags}
|
||||
|
||||
- name: CRI Integration Test
|
||||
env:
|
||||
TEST_RUNTIME: ${{ matrix.runtime }}
|
||||
run: |
|
||||
CONTAINERD_RUNTIME=$TEST_RUNTIME make cri-integration
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
- name: cri-tools critest
|
||||
env:
|
||||
TEST_RUNTIME: ${{ matrix.runtime }}
|
||||
run: |
|
||||
BDIR="$(mktemp -d -p $PWD)"
|
||||
|
||||
function cleanup() {
|
||||
sudo pkill containerd || true
|
||||
cat ${BDIR}/containerd-cri.log
|
||||
sudo -E rm -rf ${BDIR}
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
mkdir -p ${BDIR}/{root,state}
|
||||
cat > ${BDIR}/config.toml <<EOF
|
||||
version = 2
|
||||
|
@ -471,11 +457,6 @@ jobs:
|
|||
sudo -E PATH=$PATH /usr/local/bin/containerd -a ${BDIR}/c.sock --config ${BDIR}/config.toml --root ${BDIR}/root --state ${BDIR}/state --log-level debug &> ${BDIR}/containerd-cri.log &
|
||||
sudo -E PATH=$PATH /usr/local/bin/ctr -a ${BDIR}/c.sock version
|
||||
sudo -E PATH=$PATH critest --report-dir "${{github.workspace}}/critestreport" --runtime-endpoint=unix:///${BDIR}/c.sock --parallel=8
|
||||
TEST_RC=$?
|
||||
test $TEST_RC -ne 0 && cat ${BDIR}/containerd-cri.log
|
||||
sudo pkill containerd
|
||||
sudo -E rm -rf ${BDIR}
|
||||
test $TEST_RC -eq 0 || /bin/false
|
||||
|
||||
# Log the status of this VM to investigate issues like
|
||||
# https://github.com/containerd/containerd/issues/4969
|
||||
|
@ -486,7 +467,7 @@ jobs:
|
|||
mount
|
||||
df
|
||||
losetup -l
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: TestResults ${{ matrix.runtime }} ${{matrix.runc}}
|
||||
|
@ -496,65 +477,62 @@ jobs:
|
|||
|
||||
tests-mac-os:
|
||||
name: MacOS unit tests
|
||||
runs-on: macos-10.15
|
||||
runs-on: macos-12
|
||||
timeout-minutes: 10
|
||||
needs: [project, linters, protos, man]
|
||||
needs: [linters, protos, man]
|
||||
env:
|
||||
GOTEST: gotestsum --
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- run: sudo -E PATH=$PATH script/setup/install-gotestsum
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- uses: actions/checkout@v3
|
||||
- run: script/setup/install-gotestsum
|
||||
- name: Tests
|
||||
env:
|
||||
GOPROXY: direct
|
||||
GOTESTSUM_JUNITFILE: "${{ github.workspace }}/macos-test-junit.xml"
|
||||
run: |
|
||||
make test
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
- uses: actions/upload-artifact@v2
|
||||
run: make test
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: TestResults MacOS
|
||||
path: |
|
||||
*-junit.xml
|
||||
|
||||
cgroup2:
|
||||
name: CGroupsV2 and SELinux Integration
|
||||
vagrant:
|
||||
name: Vagrant
|
||||
# nested virtualization is only available on macOS hosts
|
||||
runs-on: macos-10.15
|
||||
runs-on: macos-12
|
||||
timeout-minutes: 45
|
||||
needs: [project, linters, protos, man]
|
||||
needs: [linters, protos, man]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Currently crun is disabled to decrease CI flakiness.
|
||||
# We can enable crun again when we get a better CI infra.
|
||||
runc: [runc]
|
||||
# Fedora is for testing cgroup v2 functionality, Rocky Linux is for testing on an enterprise-grade environment
|
||||
box: ["fedora/37-cloud-base", "rockylinux/8"]
|
||||
env:
|
||||
GOTEST: gotestsum --
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Cache ~/.vagrant.d/boxes"
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.vagrant.d/boxes
|
||||
key: vagrant-${{ hashFiles('Vagrantfile*') }}
|
||||
|
||||
- name: Vagrant start
|
||||
env:
|
||||
BOX: ${{ matrix.box }}
|
||||
run: |
|
||||
if [ "$BOX" = "rockylinux/8" ]; then
|
||||
# The latest version 5.0.0 seems 404 (as of March 30, 2022)
|
||||
export BOX_VERSION="4.0.0"
|
||||
fi
|
||||
# Retry if it fails (download.fedoraproject.org returns 404 sometimes)
|
||||
vagrant up || vagrant up
|
||||
|
||||
|
@ -571,6 +549,11 @@ jobs:
|
|||
SELINUX: Enforcing
|
||||
REPORT_DIR: /tmp/critestreport
|
||||
run: vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-cri
|
||||
|
||||
- name: Collect the VM's IP address for Docker Hub's throttling issue
|
||||
if: failure()
|
||||
run: vagrant ssh -- curl https://api64.ipify.org/
|
||||
|
||||
- name: Get test reports
|
||||
if: always()
|
||||
run: |
|
||||
|
@ -579,10 +562,43 @@ jobs:
|
|||
vagrant plugin install vagrant-scp
|
||||
vagrant scp :/tmp/test-integration-junit.xml "${{ github.workspace }}/"
|
||||
vagrant scp :/tmp/critestreport "${{ github.workspace }}/critestreport"
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: TestResults cgroup2 ${{ matrix.runtime }} ${{matrix.runc}}
|
||||
# ${{ matrix.box }} cannot be used here due to character limitation
|
||||
name: TestResults vagrant ${{ github.run_id }} ${{ matrix.runtime }} ${{matrix.runc}}
|
||||
path: |
|
||||
${{github.workspace}}/*-junit.xml
|
||||
${{github.workspace}}/critestreport/*
|
||||
|
||||
cgroup2-misc:
|
||||
name: CGroupsV2 - rootless CRI test
|
||||
# nested virtualization is only available on macOS hosts
|
||||
runs-on: macos-12
|
||||
timeout-minutes: 45
|
||||
needs: [linters, protos, man]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Cache ~/.vagrant.d/boxes"
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.vagrant.d/boxes
|
||||
key: vagrant-${{ hashFiles('Vagrantfile*') }}
|
||||
|
||||
- name: Vagrant start
|
||||
run: |
|
||||
# Retry if it fails (download.fedoraproject.org returns 404 sometimes)
|
||||
vagrant up || vagrant up
|
||||
|
||||
# slow, so separated from the regular cgroup2 task
|
||||
- name: CRI-in-UserNS test with Rootless Podman
|
||||
run: |
|
||||
vagrant up --provision-with=install-rootless-podman
|
||||
# Execute rootless podman to create the UserNS env
|
||||
vagrant ssh -- podman build --target cri-in-userns -t cri-in-userns -f /vagrant/contrib/Dockerfile.test /vagrant
|
||||
vagrant ssh -- podman run --rm --privileged cri-in-userns
|
||||
|
||||
- name: Collect the VM's IP address for Docker Hub's throttling issue
|
||||
if: failure()
|
||||
run: vagrant ssh -- curl https://api64.ipify.org/
|
||||
|
|
|
@ -2,15 +2,24 @@ name: "CodeQL Scan"
|
|||
|
||||
on:
|
||||
push:
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
branches:
|
||||
- main
|
||||
- 'release/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/codeql.yml'
|
||||
branches:
|
||||
- main
|
||||
- 'release/**'
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
if: github.repository == 'containerd/containerd'
|
||||
permissions:
|
||||
actions: read # for github/codeql-action/init to get workflow details
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/analyze to upload SARIF results
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
|
@ -20,30 +29,22 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.8
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v2
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below).
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
- run: |
|
||||
sudo apt-get install -y libseccomp-dev libbtrfs-dev
|
||||
make
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
name: "Mirror Test Image"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
upstream:
|
||||
description: "Upstream image to mirror"
|
||||
required: true
|
||||
default: "docker.io/library/busybox:1.32"
|
||||
image:
|
||||
description: "Target image name (override)"
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
mirror:
|
||||
name: "Mirror Image"
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.20.8"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- name: Set env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install containerd dependencies
|
||||
env:
|
||||
RUNC_FLAVOR: ${{ matrix.runc }}
|
||||
GOFLAGS: -modcacherw
|
||||
run: |
|
||||
sudo apt-get install -y gperf
|
||||
sudo -E PATH=$PATH script/setup/install-seccomp
|
||||
|
||||
- name: Install containerd
|
||||
env:
|
||||
CGO_ENABLED: 1
|
||||
run: |
|
||||
make binaries GO_BUILD_FLAGS="-mod=vendor" GO_BUILDTAGS="no_btrfs"
|
||||
sudo -E PATH=$PATH make install
|
||||
|
||||
- name: Pull and push image
|
||||
shell: bash
|
||||
run: |
|
||||
sudo containerd -l debug & > /tmp/containerd.out
|
||||
containerd_pid=$!
|
||||
sleep 5
|
||||
|
||||
upstream=${{ github.event.inputs.upstream }}
|
||||
target=${{ github.event.inputs.image }}
|
||||
if [[ "$target" == "" ]]; then
|
||||
mirror="ghcr.io/containerd/${upstream##*/}"
|
||||
else
|
||||
mirror="ghcr.io/containerd/${target}"
|
||||
fi
|
||||
|
||||
echo "Mirroring $upstream to $mirror"
|
||||
|
||||
sudo ctr content fetch --all-platforms ${upstream}
|
||||
sudo ctr images ls
|
||||
sudo ctr --debug images push -u ${{ github.actor }}:${{ secrets.GITHUB_TOKEN }} ${mirror} ${upstream}
|
||||
|
||||
sudo kill $containerd_pid
|
|
@ -6,6 +6,12 @@ on:
|
|||
paths:
|
||||
- '.github/workflows/nightly.yml'
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.20.8"
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
name: Linux
|
||||
|
@ -16,11 +22,11 @@ jobs:
|
|||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
|
@ -36,12 +42,13 @@ jobs:
|
|||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc) main" || true
|
||||
sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc)-updates main" || true
|
||||
sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el,riscv64] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc) main" || true
|
||||
sudo add-apt-repository "deb [arch=arm64,s390x,ppc64el,riscv64] http://ports.ubuntu.com/ubuntu-ports/ $(lsb_release -sc)-updates main" || true
|
||||
|
||||
sudo dpkg --add-architecture arm64
|
||||
sudo dpkg --add-architecture s390x
|
||||
sudo dpkg --add-architecture ppc64el
|
||||
sudo dpkg --add-architecture riscv64
|
||||
|
||||
sudo apt-get update || true
|
||||
|
||||
|
@ -49,14 +56,12 @@ jobs:
|
|||
crossbuild-essential-arm64 \
|
||||
crossbuild-essential-s390x \
|
||||
crossbuild-essential-ppc64el \
|
||||
libseccomp-dev:amd64 \
|
||||
libseccomp-dev:arm64 \
|
||||
libseccomp-dev:s390x \
|
||||
libseccomp-dev:ppc64el \
|
||||
crossbuild-essential-riscv64 \
|
||||
libbtrfs-dev:amd64 \
|
||||
libbtrfs-dev:arm64 \
|
||||
libbtrfs-dev:s390x \
|
||||
libbtrfs-dev:ppc64el
|
||||
libbtrfs-dev:ppc64el \
|
||||
libbtrfs-dev:riscv64
|
||||
|
||||
- name: Build amd64
|
||||
env:
|
||||
|
@ -96,6 +101,16 @@ jobs:
|
|||
make binaries
|
||||
mv bin bin_ppc64le
|
||||
|
||||
- name: Build riscv64
|
||||
env:
|
||||
GOOS: linux
|
||||
GOARCH: riscv64
|
||||
CGO_ENABLED: 1
|
||||
CC: riscv64-linux-gnu-gcc
|
||||
run: |
|
||||
make binaries
|
||||
mv bin bin_riscv64
|
||||
|
||||
#
|
||||
# Upload
|
||||
#
|
||||
|
@ -124,6 +139,12 @@ jobs:
|
|||
name: linux_ppc64le
|
||||
path: src/github.com/containerd/containerd/bin_ppc64le
|
||||
|
||||
- name: Upload artifacts (linux_riscv64)
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: linux_riscv64
|
||||
path: src/github.com/containerd/containerd/bin_riscv64
|
||||
|
||||
windows:
|
||||
name: Windows
|
||||
runs-on: windows-latest
|
||||
|
@ -133,11 +154,11 @@ jobs:
|
|||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
|
|
|
@ -5,17 +5,23 @@ on:
|
|||
|
||||
name: Containerd Release
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.20.8"
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check Signed Tag
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
stringver: ${{ steps.contentrel.outputs.stringver }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
path: src/github.com/containerd/containerd
|
||||
|
@ -38,218 +44,110 @@ jobs:
|
|||
id: contentrel
|
||||
run: |
|
||||
RELEASEVER=${{ github.ref }}
|
||||
echo "::set-output name=stringver::${RELEASEVER#refs/tags/v}"
|
||||
echo "stringver=${RELEASEVER#refs/tags/v}" >> $GITHUB_OUTPUT
|
||||
git tag -l ${RELEASEVER#refs/tags/} -n20000 | tail -n +3 | cut -c 5- >release-notes.md
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
- name: Save release notes
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: containerd-release-notes
|
||||
path: src/github.com/containerd/containerd/release-notes.md
|
||||
|
||||
build:
|
||||
name: Build Release Binaries
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [check]
|
||||
timeout-minutes: 10
|
||||
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-18.04, windows-2019]
|
||||
|
||||
include:
|
||||
# Choose an old release of Ubuntu to avoid glibc issue https://github.com/containerd/containerd/issues/7255
|
||||
- dockerfile-ubuntu: 18.04
|
||||
dockerfile-platform: linux/amd64
|
||||
- dockerfile-ubuntu: 18.04
|
||||
dockerfile-platform: linux/arm64
|
||||
- dockerfile-ubuntu: 18.04
|
||||
dockerfile-platform: linux/ppc64le
|
||||
# riscv64 isn't supported by Ubuntu 18.04
|
||||
- dockerfile-ubuntu: 22.04
|
||||
dockerfile-platform: linux/riscv64
|
||||
- dockerfile-ubuntu: 18.04
|
||||
dockerfile-platform: windows/amd64
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.16.12'
|
||||
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Set env
|
||||
shell: bash
|
||||
env:
|
||||
MOS: ${{ matrix.os }}
|
||||
MOS: ubuntu-20.04
|
||||
run: |
|
||||
releasever=${{ github.ref }}
|
||||
releasever="${releasever#refs/tags/}"
|
||||
os=linux
|
||||
[[ "${MOS}" =~ "windows" ]] && {
|
||||
os=windows
|
||||
}
|
||||
echo "RELEASE_VER=${releasever}" >> $GITHUB_ENV
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "OS=${os}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Checkout containerd
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: containerd/containerd
|
||||
# Intentionally use github.repository instead of containerd/containerd to
|
||||
# make this action runnable on forks.
|
||||
# See https://github.com/containerd/containerd/issues/5098 for the context.
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ github.ref }}
|
||||
path: src/github.com/containerd/containerd
|
||||
|
||||
- name: HCS Shim commit
|
||||
id: hcsshim_commit
|
||||
if: startsWith(matrix.os, 'windows')
|
||||
shell: bash
|
||||
run: echo "::set-output name=sha::$(grep 'Microsoft/hcsshim ' go.mod | awk '{print $2}')"
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
- name: Checkout hcsshim source
|
||||
if: startsWith(matrix.os, 'windows')
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup buildx instance
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
repository: Microsoft/hcsshim
|
||||
ref: ${{ steps.hcsshim_commit.outputs.sha }}
|
||||
path: src/github.com/Microsoft/hcsshim
|
||||
|
||||
use: true
|
||||
- uses: crazy-max/ghaction-github-runtime@v2 # sets up needed vars for caching to github
|
||||
- name: Make
|
||||
shell: bash
|
||||
run: |
|
||||
make build
|
||||
make binaries
|
||||
rm bin/containerd-stress*
|
||||
[[ "${OS}" == "windows" ]] && {
|
||||
(
|
||||
bindir="$(pwd)/bin"
|
||||
cd ../../Microsoft/hcsshim
|
||||
GO111MODULE=on go build -mod=vendor -o "${bindir}/containerd-shim-runhcs-v1.exe" ./cmd/containerd-shim-runhcs-v1
|
||||
)
|
||||
}
|
||||
TARFILE="containerd-${RELEASE_VER#v}-${OS}-amd64.tar.gz"
|
||||
tar czf ${TARFILE} bin/
|
||||
sha256sum ${TARFILE} >${TARFILE}.sha256sum
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
|
||||
- name: Save build binaries
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: containerd-binaries-${{ matrix.os }}
|
||||
path: src/github.com/containerd/containerd/*.tar.gz*
|
||||
|
||||
- name: Make cri-containerd tar
|
||||
shell: bash
|
||||
env:
|
||||
RUNC_FLAVOR: runc
|
||||
run: |
|
||||
if [[ "${OS}" == "linux" ]]; then
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gperf
|
||||
sudo -E PATH=$PATH script/setup/install-seccomp
|
||||
cache="--cache-from=type=gha,scope=containerd-release --cache-to=type=gha,scope=containerd-release"
|
||||
if [[ "${PLATFORM}" =~ "windows" ]]; then
|
||||
# For Windows the cni build script generates a config but shells out to powershell (and also assume it is running on windows) to get a gateway and subnet.
|
||||
# The values provided here are taken from packages that we previously generated.
|
||||
export GATEWAY=172.21.16.1
|
||||
export PREFIX_LEN=12
|
||||
BUILD_ARGS="--build-arg GATEWAY --build-arg PREFIX_LEN"
|
||||
fi
|
||||
make cri-cni-release
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
docker buildx build ${cache} --build-arg RELEASE_VER --build-arg UBUNTU_VERSION=${{ matrix.dockerfile-ubuntu }} --build-arg GO_VERSION ${BUILD_ARGS} -f .github/workflows/release/Dockerfile --platform=${PLATFORM} -o releases/ .
|
||||
echo PLATFORM_CLEAN=${PLATFORM/\//-} >> $GITHUB_ENV
|
||||
|
||||
- name: Save cri-containerd binaries
|
||||
uses: actions/upload-artifact@v2
|
||||
# Remove symlinks since we don't want these in the release Artifacts
|
||||
find ./releases/ -maxdepth 1 -type l | xargs rm
|
||||
working-directory: src/github.com/containerd/containerd
|
||||
env:
|
||||
PLATFORM: ${{ matrix.dockerfile-platform }}
|
||||
- name: Save Artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: cri-containerd-binaries-${{ matrix.os }}
|
||||
path: src/github.com/containerd/containerd/releases/cri-containerd-cni-*.tar.gz*
|
||||
name: release-tars-${{env.PLATFORM_CLEAN}}
|
||||
path: src/github.com/containerd/containerd/releases/*.tar.gz*
|
||||
|
||||
release:
|
||||
name: Create containerd Release
|
||||
runs-on: ubuntu-18.04
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
needs: [build, check]
|
||||
|
||||
steps:
|
||||
- name: Download builds and release notes
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: builds
|
||||
- name: Catalog build assets for upload
|
||||
id: catalog
|
||||
run: |
|
||||
_filenum=1
|
||||
for i in "ubuntu-18.04" "windows-2019"; do
|
||||
for f in `ls builds/containerd-binaries-${i}`; do
|
||||
echo "::set-output name=file${_filenum}::${f}"
|
||||
let "_filenum+=1"
|
||||
done
|
||||
for f in `ls builds/cri-containerd-binaries-${i}`; do
|
||||
echo "::set-output name=file${_filenum}::${f}"
|
||||
let "_filenum+=1"
|
||||
done
|
||||
done
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1.1.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: containerd ${{ needs.check.outputs.stringver }}
|
||||
body_path: ./builds/containerd-release-notes/release-notes.md
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
fail_on_unmatched_files: true
|
||||
name: containerd ${{ needs.check.outputs.stringver }}
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'rc') }}
|
||||
- name: Upload Linux containerd tarball
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file1 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file1 }}
|
||||
asset_content_type: application/gzip
|
||||
- name: Upload Linux sha256 sum
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file2 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file2 }}
|
||||
asset_content_type: text/plain
|
||||
- name: Upload Linux cri containerd tarball
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/cri-containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file3 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file3 }}
|
||||
asset_content_type: application/gzip
|
||||
- name: Upload Linux cri sha256 sum
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/cri-containerd-binaries-ubuntu-18.04/${{ steps.catalog.outputs.file4 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file4 }}
|
||||
asset_content_type: text/plain
|
||||
- name: Upload Windows containerd tarball
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/containerd-binaries-windows-2019/${{ steps.catalog.outputs.file5 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file5 }}
|
||||
asset_content_type: application/gzip
|
||||
- name: Upload Windows sha256 sum
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/containerd-binaries-windows-2019/${{ steps.catalog.outputs.file6 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file6 }}
|
||||
asset_content_type: text/plain
|
||||
- name: Upload Windows cri containerd tarball
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/cri-containerd-binaries-windows-2019/${{ steps.catalog.outputs.file7 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file7 }}
|
||||
asset_content_type: application/gzip
|
||||
- name: Upload Windows cri sha256 sum
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./builds/cri-containerd-binaries-windows-2019/${{ steps.catalog.outputs.file8 }}
|
||||
asset_name: ${{ steps.catalog.outputs.file8 }}
|
||||
asset_content_type: text/plain
|
||||
body_path: ./builds/containerd-release-notes/release-notes.md
|
||||
files: |
|
||||
builds/release-tars-**/*
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# UBUNTU_VERSION can be set to 18.04 (bionic), 20.04 (focal), or 22.04 (jammy)
|
||||
ARG UBUNTU_VERSION=18.04
|
||||
ARG BASE_IMAGE=ubuntu:${UBUNTU_VERSION}
|
||||
ARG GO_VERSION
|
||||
ARG GO_IMAGE=golang:${GO_VERSION}
|
||||
FROM --platform=$BUILDPLATFORM $GO_IMAGE AS go
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.1.0@sha256:76a8510b1798f66fcc87e7ec2f4684aa1b16756df2a397ec307b9efb6023f6c5 AS xx
|
||||
|
||||
FROM --platform=$BUILDPLATFORM ${BASE_IMAGE} AS base
|
||||
COPY --from=xx / /
|
||||
SHELL ["/bin/bash", "-xec"]
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && \
|
||||
apt-get install -y dpkg-dev git make pkg-config
|
||||
ARG TARGETPLATFORM
|
||||
RUN xx-apt-get install -y libseccomp-dev btrfs-progs gcc
|
||||
RUN if grep -qE 'UBUNTU_CODENAME=(focal|jammy)' /etc/os-release; then xx-apt-get install -y libbtrfs-dev; fi
|
||||
ENV PATH=/usr/local/go/bin:$PATH
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=1
|
||||
|
||||
FROM base AS linux
|
||||
FROM base AS windows
|
||||
# Set variables used by cni script which would otherwise shell out to powershell
|
||||
ARG GATEWAY
|
||||
ARG PREFIX_LEN
|
||||
|
||||
FROM ${TARGETOS} AS target
|
||||
WORKDIR /go/src/github.com/containerd/containerd
|
||||
COPY . .
|
||||
ARG TARGETPLATFORM
|
||||
ARG RELEASE_VER
|
||||
ENV VERSION=$RELEASE_VER
|
||||
RUN \
|
||||
--mount=type=bind,from=go,source=/usr/local/go,target=/usr/local/go \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg \
|
||||
export CC=$(xx-info)-gcc && xx-go --wrap && \
|
||||
make release cri-release cri-cni-release && \
|
||||
for f in $(find bin -executable -type f); do xx-verify $f; done
|
||||
|
||||
# check git working tree after build
|
||||
RUN \
|
||||
export GIT_STATUS_OUTPUT=$(git status --porcelain) && \
|
||||
test -z $GIT_STATUS_OUTPUT || (echo $GIT_STATUS_OUTPUT && exit 1)
|
||||
|
||||
FROM scratch AS release
|
||||
COPY --from=target /go/src/github.com/containerd/containerd/releases/ /
|
|
@ -0,0 +1,32 @@
|
|||
# Workflow intended to periodically run the Windows Integration test workflow.
|
||||
|
||||
name: Windows Periodic Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
triggerWinIntegration:
|
||||
# NOTE: the following permissions are required by `google-github-actions/auth`:
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
if: github.repository == 'containerd/containerd'
|
||||
# NOTE(aznashwan, 11/24/21): GitHub actions do not currently support referencing
|
||||
# or evaluating any kind of variables in the `uses` clause, but this will
|
||||
# ideally be added in the future in which case the hardcoded reference to the
|
||||
# upstream containerd repository should be replaced with the following to
|
||||
# potentially allow contributors to enable periodic Windows tests on forks as well:
|
||||
# uses: "${{ github.repository }}/.github/workflows/windows-periodic.yml@${{ github.ref_name }}"
|
||||
uses: containerd/containerd/.github/workflows/windows-periodic.yml@main
|
||||
secrets:
|
||||
AZURE_SUB_ID: "${{ secrets.AZURE_SUB_ID }}"
|
||||
AZURE_CREDS: "${{ secrets.AZURE_CREDS }}"
|
||||
GCP_SERVICE_ACCOUNT: "${{ secrets.GCP_SERVICE_ACCOUNT }}"
|
||||
GCP_WORKLOAD_IDENTITY_PROVIDER: "${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}"
|
|
@ -0,0 +1,256 @@
|
|||
# Workflow intended to run containerd integration tests on Windows.
|
||||
|
||||
name: Windows Integration Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
secrets:
|
||||
AZURE_SUB_ID:
|
||||
required: true
|
||||
AZURE_CREDS:
|
||||
required: true
|
||||
GCP_SERVICE_ACCOUNT:
|
||||
required: true
|
||||
GCP_WORKLOAD_IDENTITY_PROVIDER:
|
||||
required: true
|
||||
|
||||
env:
|
||||
AZURE_DEFAULT_LOCATION: westeurope
|
||||
AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }}
|
||||
AZURE_DEFAULT_VM_SIZE: Standard_D2s_v3
|
||||
PASSWORD: Passw0rdAdmin # temp for testing, will be generated
|
||||
DEFAULT_ADMIN_USERNAME: azureuser
|
||||
SSH_OPTS: "-o ServerAliveInterval=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
||||
REMOTE_VM_BIN_PATH: "c:\\containerd\\bin"
|
||||
BUSYBOX_TESTING_IMAGE_REF: "registry.k8s.io/e2e-test-images/busybox:1.29-2"
|
||||
RESOURCE_CONSUMER_TESTING_IMAGE_REF: "registry.k8s.io/e2e-test-images/resource-consumer:1.10"
|
||||
WEBSERVER_TESTING_IMAGE_REF: "registry.k8s.io/e2e-test-images/nginx:1.14-2"
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
winIntegration:
|
||||
# NOTE: the following permissions are required by `google-github-actions/auth`:
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
strategy:
|
||||
matrix:
|
||||
win_ver: [ltsc2019, ltsc2022]
|
||||
include:
|
||||
- win_ver: ltsc2019
|
||||
AZURE_IMG: "MicrosoftWindowsServer:WindowsServer:2019-Datacenter-with-Containers-smalldisk:17763.1935.2105080716"
|
||||
AZURE_RESOURCE_GROUP: ctrd-integration-ltsc2019-${{ github.run_id }}
|
||||
GOOGLE_BUCKET: "containerd-integration/logs/windows-ltsc2019/"
|
||||
- win_ver: ltsc2022
|
||||
AZURE_IMG: "MicrosoftWindowsServer:WindowsServer:2022-datacenter-smalldisk-g2:20348.169.2108120020"
|
||||
AZURE_RESOURCE_GROUP: ctrd-integration-ltsc2022-${{ github.run_id }}
|
||||
GOOGLE_BUCKET: "containerd-integration/logs/windows-ltsc2022/"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install required packages
|
||||
run: |
|
||||
sudo apt-get install xmlstarlet -y
|
||||
|
||||
- name: PrepareArtifacts
|
||||
run: |
|
||||
STARTED_TIME=$(date +%s)
|
||||
LOGS_DIR=$HOME/$STARTED_TIME
|
||||
echo "STARTED_TIME=$STARTED_TIME" >> $GITHUB_ENV
|
||||
echo "LOGS_DIR=$LOGS_DIR" >> $GITHUB_ENV
|
||||
mkdir -p $LOGS_DIR/artifacts
|
||||
|
||||
jq -n --arg node temp --arg timestamp $STARTED_TIME '$timestamp|tonumber|{timestamp:.,$node}' > $LOGS_DIR/started.json
|
||||
|
||||
- name: Generate ssh key pair
|
||||
run: |
|
||||
mkdir -p $HOME/.ssh/
|
||||
ssh-keygen -t rsa -b 4096 -C "ci@containerd.com" -f $HOME/.ssh/id_rsa -q -N ""
|
||||
echo "SSH_PUB_KEY=$(cat ~/.ssh/id_rsa.pub)" >> $GITHUB_ENV
|
||||
|
||||
- name: AZLogin
|
||||
uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDS }}
|
||||
|
||||
- name: AZResourceGroupCreate
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
az group create -n ${{ matrix.AZURE_RESOURCE_GROUP }} -l ${{ env.AZURE_DEFAULT_LOCATION }} --tags creationTimestamp=$(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
- name: AZTestVMCreate
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
DETAILS=$(az vm create -n winTestVM --admin-username ${{ env.DEFAULT_ADMIN_USERNAME }} --admin-password ${{ env.PASSWORD }} --image ${{ matrix.AZURE_IMG }} -g ${{ matrix.AZURE_RESOURCE_GROUP }} --nsg-rule SSH --size ${{ env.AZURE_DEFAULT_VM_SIZE }} --public-ip-sku Standard -o json)
|
||||
PUB_IP=$(echo $DETAILS | jq -r .publicIpAddress)
|
||||
if [ "$PUB_IP" == "null" ]
|
||||
then
|
||||
RETRY=0
|
||||
while [ "$PUB_IP" == "null" ] || [ $RETRY -le 5 ]
|
||||
do
|
||||
sleep 5
|
||||
PUB_IP=$(az vm show -d -g ${{ matrix.AZURE_RESOURCE_GROUP }} -n winTestVM -o json --query publicIps | jq -r)
|
||||
RETRY=$(( $RETRY + 1 ))
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "$PUB_IP" == "null" ]
|
||||
then
|
||||
echo "failed to fetch public IP"
|
||||
exit 1
|
||||
fi
|
||||
echo "VM_PUB_IP=$PUB_IP" >> $GITHUB_ENV
|
||||
|
||||
- name: EnableAZVMSSH
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
az vm run-command invoke --command-id RunPowerShellScript -n winTestVM -g ${{ matrix.AZURE_RESOURCE_GROUP }} --scripts @$GITHUB_WORKSPACE/script/setup/enable_ssh_windows.ps1 --parameters 'SSHPublicKey=${{ env.SSH_PUB_KEY }}'
|
||||
|
||||
- name: TestSSHConnection
|
||||
run: |
|
||||
if ! ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "hostname";
|
||||
then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: InstallContainerFeatureWS2022
|
||||
if: ${{ matrix.win_ver == 'ltsc2022' }}
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { Install-WindowsFeature -Name 'Containers' -Restart }"
|
||||
|
||||
- name: WaitForVMToRestart
|
||||
if: ${{ matrix.win_ver == 'ltsc2022' }}
|
||||
timeout-minutes: 5
|
||||
run: |
|
||||
# give the vm 30 seconds to actually stop. SSH server might actually respond while server is shutting down.
|
||||
sleep 30
|
||||
while [ ! $( ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "hostname") ];
|
||||
do
|
||||
echo "Unable to connect to azurevm"
|
||||
done
|
||||
echo "Connection reestablished. VM restarted succesfully."
|
||||
|
||||
- name: CreateNatNetworkWS2022
|
||||
if: ${{ matrix.win_ver == 'ltsc2022' }}
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { curl.exe -L 'https://raw.githubusercontent.com/microsoft/SDN/master/Kubernetes/windows/hns.psm1' -o hns.psm1 }"
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { Import-Module .\hns.psm1 ; New-HnsNetwork -Type NAT -Name nat -AddressPrefix 172.19.208.0/20 -Gateway 172.19.208.1 }"
|
||||
|
||||
- name: PrepareTestingEnv
|
||||
run: |
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} $GITHUB_WORKSPACE/script/setup/prepare_env_windows.ps1 azureuser@${{ env.VM_PUB_IP }}:/prepare_env_windows.ps1
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "c:\\prepare_env_windows.ps1"
|
||||
|
||||
- name: MakeContainerDBins
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "git clone http://github.com/containerd/containerd c:\\containerd "
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "cd c:\containerd ; make binaries"
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "git clone http://github.com/Microsoft/hcsshim c:\containerd\hcsshim "
|
||||
|
||||
# Get shim commit from containerd local repo
|
||||
SHIM_COMMIT=$(grep 'Microsoft/hcsshim' go.mod | awk '{ print $2 }');
|
||||
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "cd c:\containerd\hcsshim; git fetch --tags origin $SHIM_COMMIT ; \
|
||||
git checkout $SHIM_COMMIT ; go build -mod=vendor -o ${{ env.REMOTE_VM_BIN_PATH }}\containerd-shim-runhcs-v1.exe .\cmd\containerd-shim-runhcs-v1"
|
||||
|
||||
- name: RunIntegrationTests
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -s" << EOF
|
||||
cd /c/containerd
|
||||
export EXTRA_TESTFLAGS="-timeout=20m"
|
||||
make integration | tee /c/Logs/integration.log
|
||||
EOF
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -c 'cat /c/Logs/integration.log | go-junit-report.exe > /c/Logs/junit_00.xml'"
|
||||
|
||||
- name: PrepareRepoList
|
||||
run: |
|
||||
cat > repolist.toml << EOF
|
||||
busybox = "${{ env.BUSYBOX_TESTING_IMAGE_REF }}"
|
||||
ResourceConsumer = "${{ env.RESOURCE_CONSUMER_TESTING_IMAGE_REF }}"
|
||||
EOF
|
||||
|
||||
cat > cri-test-images.yaml << EOF
|
||||
defaultTestContainerImage: ${{ env.BUSYBOX_TESTING_IMAGE_REF }}
|
||||
webServerTestImage: ${{ env.WEBSERVER_TESTING_IMAGE_REF }}
|
||||
EOF
|
||||
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} repolist.toml azureuser@${{ env.VM_PUB_IP }}:c:/repolist.toml
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} cri-test-images.yaml azureuser@${{ env.VM_PUB_IP }}:c:/cri-test-images.yaml
|
||||
|
||||
- name: RunCRIIntegrationTests
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -s" <<EOF
|
||||
cd c:/containerd
|
||||
./script/setup/install-cni-windows
|
||||
export TEST_IMAGE_LIST=c:/repolist.toml
|
||||
make cri-integration | tee c:/Logs/cri-integration.log
|
||||
EOF
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -c 'cat /c/Logs/cri-integration.log | go-junit-report.exe > c:/Logs/junit_01.xml' "
|
||||
|
||||
- name: GetCritestRepo
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "git clone https://github.com/kubernetes-sigs/cri-tools c:/cri-tools"
|
||||
|
||||
- name: BuildCritest
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "sh.exe -c 'cd /c/cri-tools && make critest'"
|
||||
|
||||
- name: RunCritest
|
||||
run: |
|
||||
ssh -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }} "powershell.exe -command { Start-Process -FilePath C:\containerd\bin\containerd.exe -NoNewWindow -RedirectStandardError true -PassThru ; get-process | sls containerd ; start-sleep 5 ; c:\cri-tools\build\bin\critest.exe --runtime-endpoint=\"npipe:\\\\.\\pipe\\containerd-containerd\" --test-images-file='c:\cri-test-images.yaml' --report-dir='c:\Logs' }"
|
||||
|
||||
- name: PullLogsFromWinNode
|
||||
run: |
|
||||
scp -i $HOME/.ssh/id_rsa ${{ env.SSH_OPTS }} azureuser@${{ env.VM_PUB_IP }}:c:/Logs/*.xml ${{ env.LOGS_DIR }}/artifacts/
|
||||
for f in $(ls ${{ env.LOGS_DIR }}/artifacts/*.xml); do
|
||||
xmlstarlet ed -d "/testsuites/testsuite/properties" $f > ${{ env.LOGS_DIR }}/$(basename $f)
|
||||
mv ${{ env.LOGS_DIR }}/$(basename $f) $f
|
||||
done
|
||||
|
||||
- name: FinishJob
|
||||
run: |
|
||||
jq -n --arg result SUCCESS --arg timestamp $(date +%s) '$timestamp|tonumber|{timestamp:.,$result}' > ${{ env.LOGS_DIR }}/finished.json
|
||||
echo "${{ env.STARTED_TIME }}" > ${{ github.workspace }}/latest-build.txt
|
||||
|
||||
- name: AssignGcpCreds
|
||||
id: AssignGcpCreds
|
||||
run: |
|
||||
echo 'GCP_SERVICE_ACCOUNT=${{ secrets.GCP_SERVICE_ACCOUNT }}' >> $GITHUB_OUTPUT
|
||||
echo 'GCP_WORKLOAD_IDENTITY_PROVIDER=${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}' >> $GITHUB_OUTPUT
|
||||
|
||||
- name: AuthGcp
|
||||
uses: google-github-actions/auth@v0
|
||||
if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER
|
||||
with:
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
|
||||
- name: UploadJobReport
|
||||
uses: google-github-actions/upload-cloud-storage@v0
|
||||
if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER
|
||||
with:
|
||||
path: ${{ github.workspace }}/latest-build.txt
|
||||
destination: ${{ matrix.GOOGLE_BUCKET }}
|
||||
parent: false
|
||||
|
||||
- name: UploadLogsDir
|
||||
uses: google-github-actions/upload-cloud-storage@v0
|
||||
if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER
|
||||
with:
|
||||
path: ${{ env.LOGS_DIR }}
|
||||
destination: ${{ matrix.GOOGLE_BUCKET }}${{ env.STARTED_TIME}}
|
||||
parent: false
|
||||
|
||||
- name: ResourceCleanup
|
||||
if: always()
|
||||
uses: azure/CLI@v1
|
||||
with:
|
||||
inlinescript: |
|
||||
az group delete -g ${{ matrix.AZURE_RESOURCE_GROUP }} --yes
|
|
@ -1,27 +1,55 @@
|
|||
linters:
|
||||
enable:
|
||||
- structcheck
|
||||
- varcheck
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- exportloopref # Checks for pointers to enclosing loop variables
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- ineffassign
|
||||
- vet
|
||||
- unused
|
||||
- misspell
|
||||
- nolintlint
|
||||
- revive
|
||||
- staticcheck
|
||||
- tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
|
||||
- unconvert
|
||||
- unused
|
||||
- vet
|
||||
- dupword # Checks for duplicate words in the source code
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0002
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
||||
# Only using / doesn't work due to https://github.com/golangci/golangci-lint/issues/1398.
|
||||
exclude-rules:
|
||||
- path: 'archive[\\/]tarheader[\\/]'
|
||||
# conversion is necessary on Linux, unnecessary on macOS
|
||||
text: "unnecessary conversion"
|
||||
|
||||
linters-settings:
|
||||
gosec:
|
||||
# The following issues surfaced when `gosec` linter
|
||||
# was enabled. They are temporarily excluded to unblock
|
||||
# the existing workflow, but still to be addressed by
|
||||
# future works.
|
||||
excludes:
|
||||
- G204
|
||||
- G305
|
||||
- G306
|
||||
- G402
|
||||
- G404
|
||||
|
||||
run:
|
||||
timeout: 3m
|
||||
timeout: 8m
|
||||
skip-dirs:
|
||||
- api
|
||||
- cluster
|
||||
- design
|
||||
- docs
|
||||
- docs/man
|
||||
- releases
|
||||
- reports
|
||||
- test # e2e scripts
|
||||
|
|
22
.mailmap
22
.mailmap
|
@ -29,13 +29,17 @@ Eric Ernst <eric@amperecomputing.com> <eric.ernst@intel.com>
|
|||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen@linux.alibaba.com>
|
||||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-linux.com>
|
||||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-inc.com>
|
||||
Fabiano Fidêncio <fidencio@redhat.com> <fabiano.fidencio@intel.com>
|
||||
Fahed Dorgaa <fahed.dorgaa@gmail.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
Fupan Li <lifupan@gmail.com>
|
||||
Fupan Li <lifupan@gmail.com> <fupan.lfp@antfin.com>
|
||||
Fupan Li <lifupan@gmail.com> <fupan.lfp@antgroup.com>
|
||||
Furkan Türkal <furkan.turkal@trendyol.com>
|
||||
Georgia Panoutsakopoulou <gpanoutsak@gmail.com>
|
||||
Guangming Wang <guangming.wang@daocloud.io>
|
||||
Haiyan Meng <haiyanmeng@google.com>
|
||||
haoyun <yun.hao@daocloud.io>
|
||||
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com> <hushuaiia@qq.com>
|
||||
|
@ -53,15 +57,18 @@ John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
|||
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
|
||||
Lorenz Brun <lorenz@brun.one> <lorenz@nexantic.com>
|
||||
Luc Perkins <lucperkins@gmail.com>
|
||||
Jiajun Jiang <levinxo@gmail.com>
|
||||
Julien Balestra <julien.balestra@datadoghq.com>
|
||||
Jun Lin Chen <webmaster@mc256.com> <1913688+mc256@users.noreply.github.com>
|
||||
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||
Justin Terry <juterry@microsoft.com>
|
||||
Justin Terry <juterry@microsoft.com> <jterry75@users.noreply.github.com>
|
||||
Kante <kerthcet@gmail.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Kevin Parsons <kevpar@microsoft.com> <kevpar@users.noreply.github.com>
|
||||
Kevin Xu <cming.xu@gmail.com>
|
||||
Kitt Hsu <kitt.hsu@gmail.com>
|
||||
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||
Krasi Georgiev <krasi.root@gmail.com> <krasi@vip-consult.solutions>
|
||||
Lantao Liu <lantaol@google.com>
|
||||
|
@ -69,16 +76,22 @@ Lantao Liu <lantaol@google.com> <taotaotheripper@gmail.com>
|
|||
Li Yuxuan <liyuxuan04@baidu.com> <darfux@163.com>
|
||||
Lifubang <lifubang@aliyun.com> <lifubang@acmcoder.com>
|
||||
Lu Jingxiao <lujingxiao@huawei.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <865334+mxpv@users.noreply.github.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <makpav@amazon.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <mxpv@apple.com>
|
||||
Mario Hros <spam@k3a.me>
|
||||
Mario Hros <spam@k3a.me> <root@k3a.me>
|
||||
Mario Macias <mariomac@gmail.com> <mmacias@newrelic.com>
|
||||
Mark Gordon <msg555@gmail.com>
|
||||
Marvin Giessing <marvin.giessing@gmail.com>
|
||||
Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
|
||||
Michael Katsoulis <michaelkatsoulis88@gmail.com>
|
||||
Mike Brown <brownwm@us.ibm.com> <mikebrow@users.noreply.github.com>
|
||||
Mohammad Asif Siddiqui <mohammad.asif.siddiqui1@huawei.com>
|
||||
Nabeel Rana <nabeelnrana@gmail.com>
|
||||
Ng Yang <wssccc@qq.com>
|
||||
Ning Li <ning.a.li@transwarp.io>
|
||||
ningmingxiao <ning.mingxiao@zte.com.cn>
|
||||
Nishchay Kumar <mrawesomenix@gmail.com>
|
||||
Oliver Stenbom <oliver@stenbom.eu> <ostenbom@pivotal.io>
|
||||
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
|
||||
|
@ -89,6 +102,7 @@ Ross Boucher <rboucher@gmail.com>
|
|||
Ruediger Maass <ruediger.maass@de.ibm.com>
|
||||
Rui Cao <ruicao@alauda.io> <ruicao@alauda.io>
|
||||
Sakeven Jiang <jc5930@sina.cn>
|
||||
Samuel Karp <me@samuelkarp.com> <samuelkarp@google.com>
|
||||
Samuel Karp <me@samuelkarp.com> <skarp@amazon.com>
|
||||
Seth Pellegrino <spellegrino@newrelic.com> <30441101+sethp-nr@users.noreply.github.com>
|
||||
Shaobao Feng <shaobao.feng@huawei.com>
|
||||
|
@ -104,16 +118,21 @@ Stephen J Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
|||
Sudeesh John <sudeesh@linux.vnet.ibm.com>
|
||||
Su Fei <fesu@ebay.com> <fesu@ebay.com>
|
||||
Su Xiaolin <linxxnil@126.com>
|
||||
Takumasa Sakao <sakataku7@gmail.com> <tsakao@zlab.co.jp>
|
||||
Ted Yu <yuzhihong@gmail.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com> <weidonglee29@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com> <21621232@zju.edu.cn>
|
||||
Wang Bing <wangbing.adam@gmail.com>
|
||||
wanglei <wllenyj@linux.alibaba.com>
|
||||
wanglei <wllenyj@linux.alibaba.com> <wanglei01@alibaba-inc.com>
|
||||
wangzhan <wang.zhan@smartx.com>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com> <fhfuwei@163.com>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xuean Yan <yan.xuean@zte.com.cn>
|
||||
Yang Yang <yang8518296@163.com>
|
||||
Yue Zhang <zy675793960@yeah.net>
|
||||
Yuxing Liu <starnop@163.com>
|
||||
Zhang Wei <zhangwei555@huawei.com>
|
||||
|
@ -124,4 +143,7 @@ Zhiyu Li <payall4u@qq.com> <404977848@qq.com>
|
|||
Zhongming Chang<zhongming.chang@daocloud.io>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io> <42261994+JoeWrightss@users.noreply.github.com>
|
||||
zounengren <zouyee1989@gmail.com> <zounengren@cmss.chinamobile.com>
|
||||
张潇 <xiaozhang0210@hotmail.com>
|
||||
Kazuyoshi Kato <kaz@fly.io> <katokazu@amazon.com>
|
||||
Andrey Epifanov <epifanov.andrey@gmail.com> <aepifanov@mirantis.com>
|
||||
|
|
35
.zuul.yaml
35
.zuul.yaml
|
@ -1,35 +0,0 @@
|
|||
- project:
|
||||
name: containerd/containerd
|
||||
merge-mode: merge
|
||||
check:
|
||||
jobs:
|
||||
- containerd-build-arm64
|
||||
- containerd-test-arm64
|
||||
- containerd-integration-test-arm64
|
||||
|
||||
- job:
|
||||
name: containerd-build-arm64
|
||||
parent: init-test
|
||||
description: |
|
||||
Containerd build in openlab cluster.
|
||||
run: .zuul/playbooks/containerd-build/run.yaml
|
||||
nodeset: ubuntu-xenial-arm64-openlab
|
||||
voting: false
|
||||
|
||||
- job:
|
||||
name: containerd-test-arm64
|
||||
parent: init-test
|
||||
description: |
|
||||
Containerd unit tests in openlab cluster.
|
||||
run: .zuul/playbooks/containerd-build/unit-test.yaml
|
||||
nodeset: ubuntu-xenial-arm64-openlab
|
||||
voting: false
|
||||
|
||||
- job:
|
||||
name: containerd-integration-test-arm64
|
||||
parent: init-test
|
||||
description: |
|
||||
Containerd unit tests in openlab cluster.
|
||||
run: .zuul/playbooks/containerd-build/integration-test.yaml
|
||||
nodeset: ubuntu-xenial-arm64-openlab
|
||||
voting: false
|
|
@ -1,96 +0,0 @@
|
|||
- hosts: all
|
||||
become: yes
|
||||
roles:
|
||||
- role: config-golang
|
||||
go_version: '1.16.12'
|
||||
arch: arm64
|
||||
tasks:
|
||||
- name: Install pre-requisites
|
||||
shell:
|
||||
cmd: |
|
||||
set -xe
|
||||
set -o pipefail
|
||||
apt-get update
|
||||
apt-get install -y btrfs-tools libseccomp-dev git pkg-config lsof gperf apparmor
|
||||
|
||||
go version
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: Install containerd and cri dependencies
|
||||
shell:
|
||||
cmd: |
|
||||
set -xe
|
||||
make install-deps
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: Install criu
|
||||
shell:
|
||||
cmd: |
|
||||
set -xe
|
||||
apt-get install -y \
|
||||
libprotobuf-dev \
|
||||
libprotobuf-c-dev \
|
||||
protobuf-c-compiler \
|
||||
protobuf-compiler \
|
||||
python-protobuf \
|
||||
libnl-3-dev \
|
||||
libnet-dev \
|
||||
libcap-dev \
|
||||
python-future
|
||||
wget https://github.com/checkpoint-restore/criu/archive/v3.13.tar.gz -O criu.tar.gz
|
||||
tar -zxf criu.tar.gz
|
||||
cd criu-3.13
|
||||
make install-criu
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: Install containerd
|
||||
shell:
|
||||
cmd: |
|
||||
set -xe
|
||||
make binaries
|
||||
make install | tee $LOGS_PATH/make_install.log
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: Tests
|
||||
shell:
|
||||
cmd: |
|
||||
make test | tee $LOGS_PATH/make_test.log
|
||||
make root-test | tee $LOGS_PATH/make_root-test.log
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: Integration 1
|
||||
shell:
|
||||
cmd: |
|
||||
make integration EXTRA_TESTFLAGS=-no-criu TESTFLAGS_RACE=-race | tee $LOGS_PATH/make_integration-test.log
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: Integration 2
|
||||
shell:
|
||||
cmd: |
|
||||
TESTFLAGS_PARALLEL=1 make integration EXTRA_TESTFLAGS=-no-criu | tee $LOGS_PATH/make_integration-test.log
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: CRI Integration Test
|
||||
shell:
|
||||
cmd: |
|
||||
CONTAINERD_RUNTIME="io.containerd.runc.v2" make cri-integration | tee $LOGS_PATH/make_cri-integration-test.log
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
||||
- name: CRI Integration Test
|
||||
shell:
|
||||
cmd: |
|
||||
if grep -q "FAIL:" $LOGS_PATH/*.log; then
|
||||
echo "FAILURE"
|
||||
exit 1
|
||||
fi
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
|
@ -1,22 +0,0 @@
|
|||
- hosts: all
|
||||
become: yes
|
||||
roles:
|
||||
- role: config-golang
|
||||
go_version: '1.16.12'
|
||||
arch: arm64
|
||||
tasks:
|
||||
- name: Build containerd
|
||||
shell:
|
||||
cmd: |
|
||||
set -xe
|
||||
set -o pipefail
|
||||
apt-get update
|
||||
apt-get install -y btrfs-tools libseccomp-dev git pkg-config
|
||||
|
||||
go version
|
||||
make | tee $LOGS_PATH/make.txt
|
||||
|
||||
cp -r ./bin $RESULTS_PATH
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
|
@ -1,20 +0,0 @@
|
|||
- hosts: all
|
||||
become: yes
|
||||
roles:
|
||||
- role: config-golang
|
||||
go_version: '1.16.12'
|
||||
arch: arm64
|
||||
tasks:
|
||||
- name: Build and test containerd
|
||||
shell:
|
||||
cmd: |
|
||||
set -xe
|
||||
set -o pipefail
|
||||
apt-get update
|
||||
apt-get install -y btrfs-tools libseccomp-dev git pkg-config
|
||||
|
||||
go version
|
||||
make build test | tee $LOGS_PATH/make_test.txt
|
||||
chdir: '{{ zuul.project.src_dir }}'
|
||||
executable: /bin/bash
|
||||
environment: '{{ global_env }}'
|
14
ADOPTERS.md
14
ADOPTERS.md
|
@ -12,10 +12,14 @@ including the Balena project listed below.
|
|||
|
||||
**_[IBM Cloud Private (ICP)](https://www.ibm.com/cloud/private)_** - IBM's on-premises cloud offering has containerd as a "tech preview" CRI runtime for the Kubernetes offered within this product for the past two releases, and plans to fully migrate to containerd in a future release.
|
||||
|
||||
**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - offers containerd as the CRI runtime in **beta** for recent versions of Kubernetes.
|
||||
**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - containerd has been offered in GKE since version 1.14 and has been the default runtime since version 1.19. It is also the only supported runtime for GKE Autopilot from the launch. [More details](https://cloud.google.com/kubernetes-engine/docs/concepts/using-containerd)
|
||||
|
||||
**_[AWS Fargate](https://aws.amazon.com/fargate)_** - uses containerd + Firecracker (noted below) as the runtime and isolation technology for containers run in the Fargate platform. Fargate is a serverless, container-native compute offering from Amazon Web Services.
|
||||
|
||||
**_[Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/)_** - EKS optionally offers containerd as a CRI runtime starting with Kubernetes version 1.21. In Kubernetes 1.22 the default CRI runtime will be containerd.
|
||||
|
||||
**_[Bottlerocket](https://aws.amazon.com/bottlerocket/)_** - Bottlerocket is a Linux distribution from Amazon Web Services purpose-built for containers using containerd as the core system runtime.
|
||||
|
||||
**_Cloud Foundry_** - The [Guardian container manager](https://github.com/cloudfoundry/guardian) for CF has been using OCI runC directly with additional code from CF managing the container image and filesystem interactions, but have recently migrated to use containerd as a replacement for the extra code they had written around runC.
|
||||
|
||||
**_Alibaba's PouchContainer_** - The Alibaba [PouchContainer](https://github.com/alibaba/pouch) project uses containerd as its runtime for a cloud native offering that has unique isolation and image distribution capabilities.
|
||||
|
@ -32,7 +36,7 @@ including the Balena project listed below.
|
|||
|
||||
**_BuildKit_** - The Moby project's [BuildKit](https://github.com/moby/buildkit) can use either runC or containerd as build execution backends for building container images. BuildKit support has also been built into the Docker engine in recent releases, making BuildKit provide the backend to the `docker build` command.
|
||||
|
||||
**_Azure acs-engine_** - Microsoft Azure's [acs-engine](https://github.com/Azure/acs-engine) open source project has customizable deployment of Kubernetes clusters, where containerd is a selectable container runtime. At some point in the future Azure's AKS service will default to use containerd as the CRI runtime for deployed Kubernetes clusters.
|
||||
**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 or greater. Containerd for Windows nodes is currently in public preview. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration)
|
||||
|
||||
**_Amazon Firecracker_** - The AWS [Firecracker VMM project](http://firecracker-microvm.io/) has extended containerd with a new snapshotter and v2 shim to allow containerd to drive virtualized container processes via their VMM implementation. More details on their containerd integration are available in [their GitHub project](https://github.com/firecracker-microvm/firecracker-containerd).
|
||||
|
||||
|
@ -42,6 +46,12 @@ including the Balena project listed below.
|
|||
|
||||
**_Inclavare Containers_** - [Inclavare Containers](https://github.com/alibaba/inclavare-containers) is an innovation of container runtime with the novel approach for launching protected containers in hardware-assisted Trusted Execution Environment (TEE) technology, aka Enclave, which can prevent the untrusted entity, such as Cloud Service Provider (CSP), from accessing the sensitive and confidential assets in use.
|
||||
|
||||
**_VMware TKG_** - [Tanzu Kubernetes Grid](https://tanzu.vmware.com/kubernetes-grid) VMware's Multicloud Kubernetes offering uses containerd as the default CRI runtime.
|
||||
|
||||
**_VMware TCE_** - [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) VMware's fully-featured, easy to manage, Kubernetes platform for learners and users. It is a freely available, community supported, and open source distribution of VMware Tanzu. It uses containerd as the default CRI runtime.
|
||||
|
||||
**_[Talos Linux](https://www.talos.dev/)_** - Talos Linux is Linux designed for Kubernetes – secure, immutable, and minimal. Talos Linux is using containerd as the core system runtime and CRI implementation.
|
||||
|
||||
**_Other Projects_** - While the above list provides a cross-section of well known uses of containerd, the simplicity and clear API layer for containerd has inspired many smaller projects around providing simple container management platforms. Several examples of building higher layer functionality on top of the containerd base have come from various containerd community participants:
|
||||
- Michael Crosby's [boss](https://github.com/crosbymichael/boss) project,
|
||||
- Evan Hazlett's [stellar](https://github.com/ehazlett/stellar) project,
|
||||
|
|
92
BUILDING.md
92
BUILDING.md
|
@ -14,8 +14,8 @@ This doc includes:
|
|||
|
||||
To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required:
|
||||
|
||||
* Go 1.13.x or above except 1.14.x
|
||||
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
|
||||
* Go 1.19.x or above
|
||||
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/protocolbuffers/protobuf/releases))
|
||||
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency.
|
||||
|
||||
## Build the development environment
|
||||
|
@ -32,9 +32,9 @@ git clone https://github.com/containerd/containerd
|
|||
|
||||
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.11.4 release for a 64-bit Linux host:
|
||||
|
||||
```
|
||||
$ wget -c https://github.com/google/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
|
||||
$ sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local
|
||||
```sh
|
||||
wget -c https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
|
||||
sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local
|
||||
```
|
||||
|
||||
`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you
|
||||
|
@ -46,38 +46,20 @@ need to satisfy these dependencies in your system:
|
|||
|
||||
At this point you are ready to build `containerd` yourself!
|
||||
|
||||
## Build runc
|
||||
## Runc
|
||||
|
||||
`runc` is the default container runtime used by `containerd` and is required to
|
||||
run containerd. While it is okay to download a runc binary and install that on
|
||||
Runc is the default container runtime used by `containerd` and is required to
|
||||
run containerd. While it is okay to download a `runc` binary and install that on
|
||||
the system, sometimes it is necessary to build runc directly when working with
|
||||
container runtime development. You can skip this step if you already have the
|
||||
correct version of `runc` installed.
|
||||
|
||||
`runc` requires `libseccomp`. You may need to install the missing dependencies:
|
||||
|
||||
* CentOS/Fedora: `yum install libseccomp libseccomp-devel`
|
||||
* Debian/Ubuntu: `apt-get install libseccomp libseccomp-dev`
|
||||
|
||||
|
||||
For the quick and dirty installation, you can use the following:
|
||||
|
||||
```
|
||||
git clone https://github.com/opencontainers/runc
|
||||
cd runc
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Make sure to follow the guidelines for versioning in [RUNC.md](/docs/RUNC.md) for the
|
||||
best results.
|
||||
container runtime development. Make sure to follow the guidelines for versioning
|
||||
in [RUNC.md](/docs/RUNC.md) for the best results.
|
||||
|
||||
## Build containerd
|
||||
|
||||
`containerd` uses `make` to create a repeatable build flow. It means that you
|
||||
can run:
|
||||
|
||||
```
|
||||
```sh
|
||||
cd containerd
|
||||
make
|
||||
```
|
||||
|
@ -86,22 +68,44 @@ This is going to build all the project binaries in the `./bin/` directory.
|
|||
|
||||
You can move them in your global path, `/usr/local/bin` with:
|
||||
|
||||
```sudo
|
||||
```sh
|
||||
sudo make install
|
||||
```
|
||||
|
||||
The install prefix can be changed by passing the `PREFIX` variable (defaults
|
||||
to `/usr/local`).
|
||||
|
||||
Note: if you set one of these vars, set them to the same values on all make stages
|
||||
(build as well as install).
|
||||
|
||||
If you want to prepend an additional prefix on actual installation (eg. packaging or chroot install),
|
||||
you can pass it via `DESTDIR` variable:
|
||||
|
||||
```sh
|
||||
sudo make install DESTDIR=/tmp/install-x973234/
|
||||
```
|
||||
|
||||
The above command installs the `containerd` binary to `/tmp/install-x973234/usr/local/bin/containerd`
|
||||
|
||||
The current `DESTDIR` convention is supported since containerd v1.6.
|
||||
Older releases was using `DESTDIR` for a different purpose that is similar to `PREFIX`.
|
||||
|
||||
|
||||
When making any changes to the gRPC API, you can use the installed `protoc`
|
||||
compiler to regenerate the API generated code packages with:
|
||||
|
||||
```sudo
|
||||
```sh
|
||||
make generate
|
||||
```
|
||||
|
||||
> *Note*: Several build tags are currently available:
|
||||
> * `no_btrfs`: A build tag disables building the btrfs snapshot driver.
|
||||
> * `no_cri`: A build tag disables building Kubernetes [CRI](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html) support into containerd.
|
||||
> See [here](https://github.com/containerd/cri-containerd#build-tags) for build tags of CRI plugin.
|
||||
> * `no_devmapper`: A build tag disables building the device mapper snapshot driver.
|
||||
> * snapshotters (alphabetical order)
|
||||
> * `no_aufs`: A build tag disables building the aufs snapshot driver.
|
||||
> * `no_btrfs`: A build tag disables building the Btrfs snapshot driver.
|
||||
> * `no_devmapper`: A build tag disables building the device mapper snapshot driver.
|
||||
> * `no_zfs`: A build tag disables building the ZFS snapshot driver.
|
||||
>
|
||||
> For example, adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
|
||||
> Makefile target will disable the btrfs driver within the containerd Go build.
|
||||
|
@ -117,26 +121,25 @@ Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of
|
|||
|
||||
You can build static binaries by providing a few variables to `make`:
|
||||
|
||||
```sudo
|
||||
make EXTRA_FLAGS="-buildmode pie" \
|
||||
EXTRA_LDFLAGS='-linkmode external -extldflags "-fno-PIC -static"' \
|
||||
BUILDTAGS="netgo osusergo static_build"
|
||||
```sh
|
||||
make STATIC=1
|
||||
```
|
||||
|
||||
> *Note*:
|
||||
> - static build is discouraged
|
||||
> - static containerd binary does not support loading shared object plugins (`*.so`)
|
||||
> - static build binaries are not position-independent
|
||||
|
||||
# Via Docker container
|
||||
|
||||
The following instructions assume you are at the parent directory of containerd source directory.
|
||||
|
||||
## Build containerd
|
||||
## Build containerd in a container
|
||||
|
||||
You can build `containerd` via a Linux-based Docker container.
|
||||
You can build an image from this `Dockerfile`:
|
||||
|
||||
```
|
||||
```dockerfile
|
||||
FROM golang
|
||||
|
||||
RUN apt-get update && \
|
||||
|
@ -158,10 +161,11 @@ This mounts `containerd` repository
|
|||
You are now ready to [build](#build-containerd):
|
||||
|
||||
```sh
|
||||
make && make install
|
||||
make && make install
|
||||
```
|
||||
|
||||
## Build containerd and runc
|
||||
## Build containerd and runc in a container
|
||||
|
||||
To have complete core container runtime, you will need both `containerd` and `runc`. It is possible to build both of these via Docker container.
|
||||
|
||||
You can use `git` to checkout `runc`:
|
||||
|
@ -177,7 +181,6 @@ FROM golang
|
|||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libbtrfs-dev libseccomp-dev
|
||||
|
||||
```
|
||||
|
||||
In our Docker container we will build `runc` build, which includes
|
||||
|
@ -246,6 +249,7 @@ go test -v -run . -test.root
|
|||
```
|
||||
|
||||
Example output from directly running `go test` to execute the `TestContainerList` test:
|
||||
|
||||
```sh
|
||||
sudo go test -v -run "TestContainerList" . -test.root
|
||||
INFO[0000] running tests against containerd revision=f2ae8a020a985a8d9862c9eb5ab66902c2888361 version=v1.0.0-beta.2-49-gf2ae8a0
|
||||
|
@ -255,6 +259,10 @@ PASS
|
|||
ok github.com/containerd/containerd 4.778s
|
||||
```
|
||||
|
||||
> *Note*: in order to run `sudo go` you need to
|
||||
> - either keep user PATH environment variable. ex: `sudo "PATH=$PATH" env go test <args>`
|
||||
> - or use `go test -exec` ex: `go test -exec sudo -v -run "TestTarWithXattr" ./archive/ -test.root`
|
||||
|
||||
## Additional tools
|
||||
|
||||
### containerd-stress
|
||||
|
|
133
Makefile
133
Makefile
|
@ -15,16 +15,22 @@
|
|||
|
||||
# Go command to use for build
|
||||
GO ?= go
|
||||
INSTALL ?= install
|
||||
|
||||
# Root directory of the project (absolute path).
|
||||
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
# Base path used to install.
|
||||
DESTDIR ?= /usr/local
|
||||
# The files will be installed under `$(DESTDIR)/$(PREFIX)`.
|
||||
# The convention of `DESTDIR` was changed in containerd v1.6.
|
||||
PREFIX ?= /usr/local
|
||||
DATADIR ?= $(PREFIX)/share
|
||||
MANDIR ?= $(DATADIR)/man
|
||||
|
||||
TEST_IMAGE_LIST ?=
|
||||
|
||||
# Used to populate variables in version package.
|
||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||
PACKAGE=github.com/containerd/containerd
|
||||
SHIM_CGO_ENABLED ?= 0
|
||||
|
@ -57,6 +63,7 @@ else
|
|||
endif
|
||||
|
||||
ifndef GODEBUG
|
||||
EXTRA_LDFLAGS += -s -w
|
||||
DEBUG_GO_GCFLAGS :=
|
||||
DEBUG_TAGS :=
|
||||
else
|
||||
|
@ -67,7 +74,7 @@ endif
|
|||
WHALE = "🇩"
|
||||
ONI = "👹"
|
||||
|
||||
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
|
||||
RELEASE=containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
CRIRELEASE=cri-containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
CRICNIRELEASE=cri-containerd-cni-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
|
||||
|
@ -82,12 +89,23 @@ ifdef BUILDTAGS
|
|||
endif
|
||||
GO_BUILDTAGS ?=
|
||||
GO_BUILDTAGS += ${DEBUG_TAGS}
|
||||
GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(GO_BUILDTAGS)",)
|
||||
GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS)'
|
||||
ifneq ($(STATIC),)
|
||||
GO_BUILDTAGS += osusergo netgo static_build
|
||||
endif
|
||||
GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",)
|
||||
|
||||
GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS)
|
||||
ifneq ($(STATIC),)
|
||||
GO_LDFLAGS += -extldflags "-static"
|
||||
endif
|
||||
GO_LDFLAGS+='
|
||||
|
||||
SHIM_GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) -extldflags "-static" $(EXTRA_LDFLAGS)'
|
||||
|
||||
# Project packages.
|
||||
PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration)
|
||||
API_PACKAGES=$(shell (cd api && $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration))
|
||||
NON_API_PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration | grep -v "containerd/api")
|
||||
TEST_REQUIRES_ROOT_PACKAGES=$(filter \
|
||||
${PACKAGES}, \
|
||||
$(shell \
|
||||
|
@ -132,6 +150,9 @@ CRIDIR=$(OUTPUTDIR)/cri
|
|||
.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test
|
||||
.DEFAULT: default
|
||||
|
||||
# Forcibly set the default goal to all, in case an include above brought in a rule definition.
|
||||
.DEFAULT_GOAL := all
|
||||
|
||||
all: binaries
|
||||
|
||||
check: proto-fmt ## run all linters
|
||||
|
@ -149,7 +170,13 @@ generate: protos
|
|||
|
||||
protos: bin/protoc-gen-gogoctrd ## generate protobuf
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}
|
||||
@find . -path ./vendor -prune -false -o -name '*.pb.go' | xargs rm
|
||||
$(eval TMPDIR := $(shell mktemp -d))
|
||||
@mv ${ROOTDIR}/vendor ${TMPDIR}
|
||||
@(cd ${ROOTDIR}/api && PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${API_PACKAGES})
|
||||
@(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${NON_API_PACKAGES})
|
||||
@mv ${TMPDIR}/vendor ${ROOTDIR}
|
||||
@rm -rf ${TMPDIR}
|
||||
|
||||
check-protos: protos ## check if protobufs needs to be generated again
|
||||
@echo "$(WHALE) $@"
|
||||
|
@ -193,9 +220,19 @@ bin/cri-integration.test:
|
|||
|
||||
cri-integration: binaries bin/cri-integration.test ## run cri integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@./script/test/cri-integration.sh
|
||||
@bash -x ./script/test/cri-integration.sh
|
||||
@rm -rf bin/cri-integration.test
|
||||
|
||||
# build runc shimv2 with failpoint control, only used by integration test
|
||||
bin/containerd-shim-runc-fp-v1: integration/failpoint/cmd/containerd-shim-runc-fp-v1 FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./integration/failpoint/cmd/containerd-shim-runc-fp-v1
|
||||
|
||||
# build CNI bridge plugin wrapper with failpoint support, only used by integration test
|
||||
bin/cni-bridge-fp: integration/failpoint/cmd/cni-bridge-fp FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) build ${GO_BUILD_FLAGS} -o $@ ./integration/failpoint/cmd/cni-bridge-fp
|
||||
|
||||
benchmark: ## run benchmarks tests
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) test ${TESTFLAGS} -bench . -run Benchmark -test.root
|
||||
|
@ -212,16 +249,16 @@ bin/%: cmd/% FORCE
|
|||
$(call BUILD_BINARY)
|
||||
|
||||
bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
|
||||
@echo "$(WHALE) $@"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
|
||||
|
||||
bin/containerd-shim-runc-v1: cmd/containerd-shim-runc-v1 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim-runc-v1"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v1 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1
|
||||
@echo "$(WHALE) $@"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1
|
||||
|
||||
bin/containerd-shim-runc-v2: cmd/containerd-shim-runc-v2 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim-runc-v2"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v2 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2
|
||||
@echo "$(WHALE) $@"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2
|
||||
|
||||
binaries: $(BINARIES) ## build binaries
|
||||
@echo "$(WHALE) $@"
|
||||
|
@ -237,30 +274,31 @@ genman: man/containerd.8 man/ctr.8
|
|||
|
||||
man/containerd.8: FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
$(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
|
||||
man/ctr.8: FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
$(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
|
||||
man/%: docs/man/%.md FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
go-md2man -in "$<" -out "$@"
|
||||
|
||||
define installmanpage
|
||||
mkdir -p $(DESTDIR)/man/man$(2);
|
||||
gzip -c $(1) >$(DESTDIR)/man/man$(2)/$(3).gz;
|
||||
$(INSTALL) -d $(DESTDIR)$(MANDIR)/man$(2);
|
||||
gzip -c $(1) >$(DESTDIR)$(MANDIR)/man$(2)/$(3).gz;
|
||||
endef
|
||||
|
||||
install-man:
|
||||
install-man: man
|
||||
@echo "$(WHALE) $@"
|
||||
$(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage))))
|
||||
|
||||
|
||||
releases/$(RELEASE).tar.gz: $(BINARIES)
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz
|
||||
@install -d releases/$(RELEASE)/bin
|
||||
@install $(BINARIES) releases/$(RELEASE)/bin
|
||||
@$(INSTALL) -d releases/$(RELEASE)/bin
|
||||
@$(INSTALL) $(BINARIES) releases/$(RELEASE)/bin
|
||||
@tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin
|
||||
@rm -rf releases/$(RELEASE)
|
||||
|
||||
|
@ -271,18 +309,18 @@ release: releases/$(RELEASE).tar.gz
|
|||
# install of cri deps into release output directory
|
||||
ifeq ($(GOOS),windows)
|
||||
install-cri-deps: $(BINARIES)
|
||||
mkdir -p $(CRIDIR)
|
||||
$(INSTALL) -d $(CRIDIR)
|
||||
DESTDIR=$(CRIDIR) script/setup/install-cni-windows
|
||||
cp bin/* $(CRIDIR)
|
||||
else
|
||||
install-cri-deps: $(BINARIES)
|
||||
@rm -rf ${CRIDIR}
|
||||
@install -d ${CRIDIR}/usr/local/bin
|
||||
@install -D -m 755 bin/* ${CRIDIR}/usr/local/bin
|
||||
@install -d ${CRIDIR}/opt/containerd/cluster
|
||||
@$(INSTALL) -d ${CRIDIR}/usr/local/bin
|
||||
@$(INSTALL) -D -m 755 bin/* ${CRIDIR}/usr/local/bin
|
||||
@$(INSTALL) -d ${CRIDIR}/opt/containerd/cluster
|
||||
@cp -r contrib/gce ${CRIDIR}/opt/containerd/cluster/
|
||||
@install -d ${CRIDIR}/etc/systemd/system
|
||||
@install -m 644 containerd.service ${CRIDIR}/etc/systemd/system
|
||||
@$(INSTALL) -d ${CRIDIR}/etc/systemd/system
|
||||
@$(INSTALL) -m 644 containerd.service ${CRIDIR}/etc/systemd/system
|
||||
echo "CONTAINERD_VERSION: '$(VERSION:v%=%)'" | tee ${CRIDIR}/opt/containerd/cluster/version
|
||||
|
||||
DESTDIR=$(CRIDIR) script/setup/install-runc
|
||||
|
@ -290,26 +328,30 @@ install-cri-deps: $(BINARIES)
|
|||
DESTDIR=$(CRIDIR) script/setup/install-critools
|
||||
DESTDIR=$(CRIDIR) script/setup/install-imgcrypt
|
||||
|
||||
@install -d $(CRIDIR)/bin
|
||||
@install $(BINARIES) $(CRIDIR)/bin
|
||||
@$(INSTALL) -d $(CRIDIR)/bin
|
||||
@$(INSTALL) $(BINARIES) $(CRIDIR)/bin
|
||||
endif
|
||||
|
||||
$(CRIDIR)/cri-containerd.DEPRECATED.txt:
|
||||
@mkdir -p $(CRIDIR)
|
||||
@$(INSTALL) -m 644 releases/cri-containerd.DEPRECATED.txt $@
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
releases/$(CRIRELEASE).tar.gz: install-cri-deps
|
||||
releases/$(CRIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt
|
||||
@echo "$(WHALE) $@"
|
||||
@cd $(CRIDIR) && tar -czf ../../releases/$(CRIRELEASE).tar.gz *
|
||||
|
||||
releases/$(CRICNIRELEASE).tar.gz: install-cri-deps
|
||||
releases/$(CRICNIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt
|
||||
@echo "$(WHALE) $@"
|
||||
@cd $(CRIDIR) && tar -czf ../../releases/$(CRICNIRELEASE).tar.gz *
|
||||
else
|
||||
releases/$(CRIRELEASE).tar.gz: install-cri-deps
|
||||
releases/$(CRIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt
|
||||
@echo "$(WHALE) $@"
|
||||
@tar -czf releases/$(CRIRELEASE).tar.gz -C $(CRIDIR) etc/crictl.yaml etc/systemd usr opt/containerd
|
||||
@tar -czf releases/$(CRIRELEASE).tar.gz -C $(CRIDIR) cri-containerd.DEPRECATED.txt etc/crictl.yaml etc/systemd usr opt/containerd
|
||||
|
||||
releases/$(CRICNIRELEASE).tar.gz: install-cri-deps
|
||||
releases/$(CRICNIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt
|
||||
@echo "$(WHALE) $@"
|
||||
@tar -czf releases/$(CRICNIRELEASE).tar.gz -C $(CRIDIR) etc usr opt
|
||||
@tar -czf releases/$(CRICNIRELEASE).tar.gz -C $(CRIDIR) cri-containerd.DEPRECATED.txt etc usr opt
|
||||
endif
|
||||
|
||||
cri-release: releases/$(CRIRELEASE).tar.gz
|
||||
|
@ -341,15 +383,17 @@ clean-test: ## clean up debris from previously failed tests
|
|||
@rm -rf /run/containerd/fifo/*
|
||||
@rm -rf /run/containerd-test/*
|
||||
@rm -rf bin/cri-integration.test
|
||||
@rm -rf bin/cni-bridge-fp
|
||||
@rm -rf bin/containerd-shim-runc-fp-v1
|
||||
|
||||
install: ## install binaries
|
||||
@echo "$(WHALE) $@ $(BINARIES)"
|
||||
@mkdir -p $(DESTDIR)/bin
|
||||
@install $(BINARIES) $(DESTDIR)/bin
|
||||
@$(INSTALL) -d $(DESTDIR)$(PREFIX)/bin
|
||||
@$(INSTALL) $(BINARIES) $(DESTDIR)$(PREFIX)/bin
|
||||
|
||||
uninstall:
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
|
||||
@rm -f $(addprefix $(DESTDIR)$(PREFIX)/bin/,$(notdir $(BINARIES)))
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
install-deps:
|
||||
|
@ -393,10 +437,23 @@ root-coverage: ## generate coverage profiles for unit tests that require root
|
|||
fi; \
|
||||
done )
|
||||
|
||||
vendor: ## vendor
|
||||
vendor: ## ensure all the go.mod/go.sum files are up-to-date including vendor/ directory
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) mod tidy
|
||||
@$(GO) mod vendor
|
||||
@$(GO) mod verify
|
||||
@(cd ${ROOTDIR}/integration/client && ${GO} mod tidy)
|
||||
|
||||
verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date
|
||||
@echo "$(WHALE) $@"
|
||||
$(eval TMPDIR := $(shell mktemp -d))
|
||||
@cp -R ${ROOTDIR} ${TMPDIR}
|
||||
@(cd ${TMPDIR}/containerd && ${GO} mod tidy)
|
||||
@(cd ${TMPDIR}/containerd/integration/client && ${GO} mod tidy)
|
||||
@diff -r -u -q ${ROOTDIR} ${TMPDIR}/containerd
|
||||
@rm -rf ${TMPDIR}
|
||||
@${ROOTDIR}/script/verify-go-modules.sh integration/client
|
||||
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
|
||||
|
|
|
@ -20,8 +20,10 @@ COMMANDS += containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2
|
|||
|
||||
# check GOOS for cross compile builds
|
||||
ifeq ($(GOOS),linux)
|
||||
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
|
||||
GO_GCFLAGS += -buildmode=pie
|
||||
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64))
|
||||
ifeq ($(STATIC),)
|
||||
GO_GCFLAGS += -buildmode=pie
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
|
|
|
@ -22,7 +22,11 @@ ifeq ($(GOARCH),amd64)
|
|||
TESTFLAGS_RACE= -race
|
||||
endif
|
||||
|
||||
BINARIES:=$(addsuffix .exe,$(BINARIES))
|
||||
WINDOWS_SHIM=bin/containerd-shim-runhcs-v1.exe
|
||||
BINARIES := $(addsuffix .exe,$(BINARIES)) $(WINDOWS_SHIM)
|
||||
|
||||
$(WINDOWS_SHIM): script/setup/install-runhcs-shim go.mod
|
||||
DESTDIR=$(CURDIR)/bin $<
|
||||
|
||||
bin/%.exe: cmd/% FORCE
|
||||
$(BUILD_BINARY)
|
||||
|
|
|
@ -31,28 +31,11 @@ plugins = ["grpc", "fieldpath"]
|
|||
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc"
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/events"]
|
||||
plugins = ["fieldpath"] # disable grpc for this package
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/services/ttrpc/events/v1"]
|
||||
plugins = ["ttrpc", "fieldpath"]
|
||||
|
||||
[[overrides]]
|
||||
# enable ttrpc and disable fieldpath and grpc for the shim
|
||||
prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
# Aggregrate the API descriptors to lock down API changes.
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/api"
|
||||
target = "api/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
|
||||
# Lock down runc config
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/runtime/linux/runctypes"
|
||||
|
|
31
README.md
31
README.md
|
@ -1,9 +1,9 @@
|
|||
![containerd banner](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png)
|
||||
![containerd banner light mode](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png#gh-light-mode-only)
|
||||
![containerd banner dark mode](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/white/containerd-horizontal-white.png#gh-dark-mode-only)
|
||||
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/containerd)](https://pkg.go.dev/github.com/containerd/containerd)
|
||||
[![Build Status](https://github.com/containerd/containerd/workflows/CI/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ACI)
|
||||
[![Nightlies](https://github.com/containerd/containerd/workflows/Nightly/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ANightly)
|
||||
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd)
|
||||
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271)
|
||||
|
||||
|
@ -21,7 +21,7 @@ We are a large inclusive OSS project that is welcoming help of any kind shape or
|
|||
* Documentation help is needed to make the product easier to consume and extend.
|
||||
* We need OSS community outreach / organizing help to get the word out; manage
|
||||
and create messaging and educational content; and to help with social media, community forums/groups, and google groups.
|
||||
* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/master/GOVERNANCE.md#security-advisors) to join the team.
|
||||
* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/main/GOVERNANCE.md#security-advisors) to join the team.
|
||||
* New sub-projects are being created, core and non-core that could use additional development help.
|
||||
* Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving.
|
||||
- If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire.
|
||||
|
@ -41,7 +41,7 @@ If you are interested in trying out containerd see our example at [Getting Start
|
|||
## Nightly builds
|
||||
|
||||
There are nightly builds available for download [here](https://github.com/containerd/containerd/actions?query=workflow%3ANightly).
|
||||
Binaries are generated from `master` branch every night for `Linux` and `Windows`.
|
||||
Binaries are generated from `main` branch every night for `Linux` and `Windows`.
|
||||
|
||||
Please be aware: nightly builds might have critical bugs, it's not recommended for use in production and no support provided.
|
||||
|
||||
|
@ -68,6 +68,14 @@ your system. See more details in [Checkpoint and Restore](#checkpoint-and-restor
|
|||
|
||||
Build requirements for developers are listed in [BUILDING](BUILDING.md).
|
||||
|
||||
|
||||
## Supported Registries
|
||||
|
||||
Any registry which is compliant with the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec)
|
||||
is supported by containerd.
|
||||
|
||||
For configuring registries, see [registry host configuration documentation](docs/hosts.md)
|
||||
|
||||
## Features
|
||||
|
||||
### Client
|
||||
|
@ -77,8 +85,11 @@ containerd offers a full client package to help you integrate containerd into yo
|
|||
```go
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
)
|
||||
|
||||
|
||||
|
@ -269,7 +280,7 @@ loaded for the user's shell environment.
|
|||
`cri` is a native plugin of containerd. Since containerd 1.1, the cri plugin is built into the release binaries and enabled by default.
|
||||
|
||||
> **Note:** As of containerd 1.5, the `cri` plugin is merged into the containerd/containerd repo. For example, the source code previously stored under [`containerd/cri/pkg`](https://github.com/containerd/cri/tree/release/1.4/pkg)
|
||||
was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/master/pkg/cri).
|
||||
was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/main/pkg/cri).
|
||||
|
||||
The `cri` plugin has reached GA status, representing that it is:
|
||||
* Feature complete
|
||||
|
@ -289,7 +300,7 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c
|
|||
* [CRI Plugin Testing Guide](./docs/cri/testing.md)
|
||||
* [Debugging Pods, Containers, and Images with `crictl`](./docs/cri/crictl.md)
|
||||
* [Configuring `cri` Plugins](./docs/cri/config.md)
|
||||
* [Configuring containerd](https://github.com/containerd/containerd/blob/master/docs/man/containerd-config.8.md)
|
||||
* [Configuring containerd](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.8.md)
|
||||
|
||||
### Communication
|
||||
|
||||
|
@ -315,14 +326,14 @@ copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by
|
|||
|
||||
## Project details
|
||||
|
||||
**containerd** is the primary open source project within the broader containerd GitHub repository.
|
||||
**containerd** is the primary open source project within the broader containerd GitHub organization.
|
||||
However, all projects within the repo have common maintainership, governance, and contributing
|
||||
guidelines which are stored in a `project` repository commonly for all containerd projects.
|
||||
|
||||
Please find all these core project documents, including the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
|
|
62
RELEASES.md
62
RELEASES.md
|
@ -27,7 +27,7 @@ considered "pre-releases".
|
|||
|
||||
### Major and Minor Releases
|
||||
|
||||
Major and minor releases of containerd will be made from master. Releases of
|
||||
Major and minor releases of containerd will be made from main. Releases of
|
||||
containerd will be marked with GPG signed tags and announced at
|
||||
https://github.com/containerd/containerd/releases. The tag will be of the
|
||||
format `v<major>.<minor>.<patch>` and should be made with the command `git tag
|
||||
|
@ -43,7 +43,7 @@ done against that branch.
|
|||
|
||||
Pre-releases, such as alphas, betas and release candidates will be conducted
|
||||
from their source branch. For major and minor releases, these releases will be
|
||||
done from master. For patch releases, these pre-releases should be done within
|
||||
done from main. For patch releases, these pre-releases should be done within
|
||||
the corresponding release branch.
|
||||
|
||||
While pre-releases are done to assist in the stabilization process, no
|
||||
|
@ -89,7 +89,7 @@ whichever is longer. Additionally, releases may have an extended security suppor
|
|||
period after the end of the active period to accept security backports. This
|
||||
timeframe will be decided by maintainers before the end of the active status.
|
||||
|
||||
The current state is available in the following table:
|
||||
The current state is available in the following tables:
|
||||
|
||||
| Release | Status | Start | End of Life |
|
||||
|---------|-------------|------------------|-------------------|
|
||||
|
@ -100,12 +100,27 @@ The current state is available in the following table:
|
|||
| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 |
|
||||
| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 |
|
||||
| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 |
|
||||
| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.4) | Active | August 17, 2020 | max(August 17, 2021, release of 1.5.0 + 6 months) |
|
||||
| [1.5](https://github.com/containerd/containerd/milestone/30) | Next | TBD | max(TBD+1 year, release of 1.6.0 + 6 months) |
|
||||
| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.12) | Extended | August 17, 2020 | March 3, 2022 (Extended) |
|
||||
| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.9) | Active | May 3, 2021 | October 28, 2022 |
|
||||
| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.0) | Active | February 15, 2022 | max(February 15, 2023 or release of 1.7.0 + 6 months) |
|
||||
| [1.7](https://github.com/containerd/containerd/milestone/42) | Next | TBD | TBD |
|
||||
|
||||
Note that branches and release from before 1.0 may not follow these rules.
|
||||
|
||||
This table should be updated as part of the release preparation process.
|
||||
| CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version |
|
||||
|------------------------|--------------------|--------------------|--------------|
|
||||
| v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 |
|
||||
| v1.0.0-beta.x | | 1.9 | v1alpha1 |
|
||||
| End-Of-Life | v1.1 (End-Of-Life) | 1.10+ | v1alpha2 |
|
||||
| | v1.2 (End-Of-Life) | 1.10+ | v1alpha2 |
|
||||
| | v1.3 (End-Of-Life) | 1.12+ | v1alpha2 |
|
||||
| | v1.4 | 1.19+ | v1alpha2 |
|
||||
| | v1.5 | 1.20+ | v1alpha2 |
|
||||
| | v1.6 | 1.23+ | v1, v1alpha2 |
|
||||
|
||||
**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration and Kubernetes only supports n-3 minor release versions.
|
||||
|
||||
These tables should be updated as part of the release preparation process.
|
||||
|
||||
### Backporting
|
||||
|
||||
|
@ -115,11 +130,11 @@ will be features for the next _minor_ or _major_ release. For the most part,
|
|||
this process is straightforward and we are here to help make it as smooth as
|
||||
possible.
|
||||
|
||||
If there are important fixes that need to be backported, please let use know in
|
||||
If there are important fixes that need to be backported, please let us know in
|
||||
one of three ways:
|
||||
|
||||
1. Open an issue.
|
||||
2. Open a PR with cherry-picked change from master.
|
||||
2. Open a PR with cherry-picked change from main.
|
||||
3. Open a PR with a ported fix.
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
|
@ -127,10 +142,10 @@ Remember that backported PRs must follow the versioning guidelines from this doc
|
|||
|
||||
Any release that is "active" can accept backports. Opening a backport PR is
|
||||
fairly straightforward. The steps differ depending on whether you are pulling
|
||||
a fix from master or need to draft a new commit specific to a particular
|
||||
a fix from main or need to draft a new commit specific to a particular
|
||||
branch.
|
||||
|
||||
To cherry pick a straightforward commit from master, simply use the cherry pick
|
||||
To cherry pick a straightforward commit from main, simply use the cherry pick
|
||||
process:
|
||||
|
||||
1. Pick the branch to which you want backported, usually in the format
|
||||
|
@ -154,7 +169,7 @@ process:
|
|||
```
|
||||
|
||||
Make sure to replace `stevvooe` with whatever fork you are using to open
|
||||
the PR. When you open the PR, make sure to switch `master` with whatever
|
||||
the PR. When you open the PR, make sure to switch `main` with whatever
|
||||
release branch you are targeting with the fix. Make sure the PR title has
|
||||
`[<release branch>]` prefixed. e.g.:
|
||||
|
||||
|
@ -162,11 +177,11 @@ process:
|
|||
[release/1.4] Fix foo in bar
|
||||
```
|
||||
|
||||
If there is no existing fix in master, you should first fix the bug in master,
|
||||
If there is no existing fix in main, you should first fix the bug in main,
|
||||
or ask us a maintainer or contributor to do it via an issue. Once that PR is
|
||||
completed, open a PR using the process above.
|
||||
|
||||
Only when the bug is not seen in master and must be made for the specific
|
||||
Only when the bug is not seen in main and must be made for the specific
|
||||
release branch should you open a PR with new code.
|
||||
|
||||
## Public API Stability
|
||||
|
@ -177,12 +192,12 @@ containerd versions:
|
|||
|
||||
| Component | Status | Stabilized Version | Links |
|
||||
|------------------|----------|--------------------|---------------|
|
||||
| GRPC API | Stable | 1.0 | [api/](api) |
|
||||
| GRPC API | Stable | 1.0 | [gRPC API](#grpc-api) |
|
||||
| Metrics API | Stable | 1.0 | - |
|
||||
| Runtime Shim API | Stable | 1.2 | - |
|
||||
| Daemon Config | Stable | 1.0 | - |
|
||||
| Daemon Config | Stable | 1.0 | - |
|
||||
| CRI GRPC API | Stable | 1.6 (_CRI v1_) | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1) |
|
||||
| Go client API | Unstable | _future_ | [godoc](https://godoc.org/github.com/containerd/containerd) |
|
||||
| CRI GRPC API | Unstable | v1alpha2 _current_ | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1alpha2) |
|
||||
| `ctr` tool | Unstable | Out of scope | - |
|
||||
|
||||
From the version stated in the above table, that component must adhere to the
|
||||
|
@ -201,7 +216,7 @@ version jump.
|
|||
To ensure compatibility, we have collected the entire GRPC API symbol set into
|
||||
a single file. At each _minor_ release of containerd, we will move the current
|
||||
`next.pb.txt` file to a file named for the minor version, such as `1.0.pb.txt`,
|
||||
enumerating the support services and messages. See [api/](api) for details.
|
||||
enumerating the support services and messages.
|
||||
|
||||
Note that new services may be added in _minor_ releases. New service methods
|
||||
and new fields on messages may be added if they are optional.
|
||||
|
@ -321,9 +336,10 @@ against total impact.
|
|||
|
||||
The deprecated features are shown in the following table:
|
||||
|
||||
| Component | Deprecation release | Target release for removal | Recommendation |
|
||||
|----------------------------------------------------------------------|---------------------|----------------------------|-------------------------------|
|
||||
| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` |
|
||||
| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter |
|
||||
| Component | Deprecation release | Target release for removal | Recommendation |
|
||||
|----------------------------------------------------------------------|---------------------|----------------------------|-----------------------------------|
|
||||
| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` |
|
||||
| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter |
|
||||
| `cri-containerd-*.tar.gz` release bundles | containerd v1.6 | containerd v2.0 | Use `containerd-*.tar.gz` bundles |
|
||||
|
|
|
@ -15,9 +15,10 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Vagrantfile for cgroup2 and SELinux
|
||||
# Vagrantfile for Fedora and EL
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "fedora/34-cloud-base"
|
||||
config.vm.box = ENV["BOX"] || "fedora/37-cloud-base"
|
||||
config.vm.box_version = ENV["BOX_VERSION"]
|
||||
memory = 4096
|
||||
cpus = 2
|
||||
config.vm.provider :virtualbox do |v|
|
||||
|
@ -29,6 +30,8 @@ Vagrant.configure("2") do |config|
|
|||
v.cpus = cpus
|
||||
end
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", type: "rsync"
|
||||
|
||||
# Disabled by default. To run:
|
||||
# vagrant up --provision-with=upgrade-packages
|
||||
# To upgrade only specific packages:
|
||||
|
@ -67,30 +70,41 @@ Vagrant.configure("2") do |config|
|
|||
libselinux-devel \
|
||||
lsof \
|
||||
make \
|
||||
strace \
|
||||
${INSTALL_PACKAGES}
|
||||
SHELL
|
||||
end
|
||||
|
||||
# EL does not have /usr/local/{bin,sbin} in the PATH by default
|
||||
config.vm.provision "setup-etc-environment", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-setup-etc-environment"
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
cat >> /etc/environment <<EOF
|
||||
PATH=/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:$PATH
|
||||
EOF
|
||||
source /etc/environment
|
||||
SHELL
|
||||
end
|
||||
|
||||
# To re-run this provisioner, installing a different version of go:
|
||||
# GO_VERSION="1.14.6" vagrant up --provision-with=install-golang
|
||||
#
|
||||
config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-golang"
|
||||
sh.env = {
|
||||
'GO_VERSION': ENV['GO_VERSION'] || "1.16.12",
|
||||
'GO_VERSION': ENV['GO_VERSION'] || "1.20.8",
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
|
||||
cat >> /etc/environment <<EOF
|
||||
PATH=/usr/local/go/bin:$PATH
|
||||
EOF
|
||||
source /etc/environment
|
||||
cat >> /etc/profile.d/sh.local <<EOF
|
||||
GOPATH=\\$HOME/go
|
||||
PATH=\\$GOPATH/bin:\\$PATH
|
||||
export GOPATH PATH
|
||||
git config --global --add safe.directory /vagrant
|
||||
EOF
|
||||
source /etc/profile.d/sh.local
|
||||
SHELL
|
||||
|
@ -135,7 +149,8 @@ EOF
|
|||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
${GOPATH}/src/github.com/containerd/containerd/script/setup/install-cni
|
||||
cd ${GOPATH}/src/github.com/containerd/containerd
|
||||
script/setup/install-cni
|
||||
PATH=/opt/cni/bin:$PATH type ${CNI_BINARIES} || true
|
||||
SHELL
|
||||
end
|
||||
|
@ -201,8 +216,8 @@ EOF
|
|||
SHELL
|
||||
end
|
||||
|
||||
# SELinux is permissive by default (via provisioning) in this VM. To re-run with SELinux enforcing:
|
||||
# vagrant up --provision-with=selinux-enforcing,test-integration
|
||||
# SELinux is Enforcing by default (via provisioning) in this VM. To re-run with SELinux disabled:
|
||||
# SELINUX=Disabled vagrant up --provision-with=selinux,test-integration
|
||||
#
|
||||
config.vm.provision "test-integration", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/test-integration"
|
||||
|
@ -218,12 +233,13 @@ EOF
|
|||
set -eux -o pipefail
|
||||
rm -rf /var/lib/containerd-test /run/containerd-test
|
||||
cd ${GOPATH}/src/github.com/containerd/containerd
|
||||
go test -v -count=1 -race ./metrics/cgroups
|
||||
make integration EXTRA_TESTFLAGS="-timeout 15m -no-criu -test.v" TEST_RUNTIME=io.containerd.runc.v2 RUNC_FLAVOR=$RUNC_FLAVOR
|
||||
SHELL
|
||||
end
|
||||
|
||||
# SELinux is permissive by default (via provisioning) in this VM. To re-run with SELinux enforcing:
|
||||
# vagrant up --provision-with=selinux-enforcing,test-cri
|
||||
# SELinux is Enforcing by default (via provisioning) in this VM. To re-run with SELinux disabled:
|
||||
# SELINUX=Disabled vagrant up --provision-with=selinux,test-cri
|
||||
#
|
||||
config.vm.provision "test-cri", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/test-cri"
|
||||
|
@ -241,6 +257,7 @@ EOF
|
|||
function cleanup()
|
||||
{
|
||||
journalctl -u containerd > /tmp/containerd.log
|
||||
cat /tmp/containerd.log
|
||||
systemctl stop containerd
|
||||
}
|
||||
selinux=$(getenforce)
|
||||
|
@ -253,7 +270,32 @@ EOF
|
|||
fi
|
||||
trap cleanup EXIT
|
||||
ctr version
|
||||
critest --parallel=$(nproc) --report-dir="${REPORT_DIR}" --ginkgo.skip='HostIpc is true'
|
||||
critest --parallel=$[$(nproc)+2] --ginkgo.skip='HostIpc is true' --report-dir="${REPORT_DIR}"
|
||||
SHELL
|
||||
end
|
||||
|
||||
# Rootless Podman is used for testing CRI-in-UserNS
|
||||
# (We could use rootless nerdctl, but we are using Podman here because it is available in dnf)
|
||||
config.vm.provision "install-rootless-podman", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-rootless-podman"
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
# Delegate cgroup v2 controllers to rootless
|
||||
mkdir -p /etc/systemd/system/user@.service.d
|
||||
cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF
|
||||
[Service]
|
||||
Delegate=yes
|
||||
EOF
|
||||
systemctl daemon-reload
|
||||
# Install Podman
|
||||
dnf install -y podman
|
||||
# Configure Podman to resolve `golang` to `docker.io/library/golang`
|
||||
mkdir -p /etc/containers
|
||||
cat > /etc/containers/registries.conf <<EOF
|
||||
[registries.search]
|
||||
registries = ['docker.io']
|
||||
EOF
|
||||
SHELL
|
||||
end
|
||||
|
||||
|
|
|
@ -7,12 +7,12 @@ plugins = ["grpc", "fieldpath"]
|
|||
[includes]
|
||||
# Include paths that will be added before all others. Typically, you want to
|
||||
# treat the root of the project as an include, but this may not be necessary.
|
||||
before = ["./protobuf"]
|
||||
before = []
|
||||
|
||||
# Paths that should be treated as include roots in relation to the vendor
|
||||
# directory. These will be calculated with the vendor directory nearest the
|
||||
# target package.
|
||||
packages = ["github.com/gogo/protobuf"]
|
||||
packages = ["github.com/gogo/protobuf", "github.com/gogo/googleapis"]
|
||||
|
||||
# Paths that will be added untouched to the end of the includes. We use
|
||||
# `/usr/local/include` to pickup the common install location of protobuf.
|
||||
|
@ -25,25 +25,30 @@ plugins = ["grpc", "fieldpath"]
|
|||
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/struct.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
|
||||
"github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1"
|
||||
"google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc"
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
|
||||
prefixes = ["github.com/containerd/containerd/api/events"]
|
||||
plugins = ["fieldpath"] # disable grpc for this package
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/services/ttrpc/events/v1"]
|
||||
plugins = ["ttrpc", "fieldpath"]
|
||||
|
||||
[[overrides]]
|
||||
# enable ttrpc and disable fieldpath and grpc for the shim
|
||||
prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"]
|
||||
plugins = ["ttrpc"]
|
||||
# Aggregrate the API descriptors to lock down API changes.
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/api"
|
||||
target = "next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
|
@ -14,4 +14,4 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
package api
|
|
@ -2389,6 +2389,7 @@ file {
|
|||
java_outer_classname: "StatusProto"
|
||||
java_multiple_files: true
|
||||
go_package: "rpc"
|
||||
cc_enable_arenas: true
|
||||
objc_class_prefix: "RPC"
|
||||
}
|
||||
syntax: "proto3"
|
||||
|
@ -3820,6 +3821,13 @@ file {
|
|||
type_name: ".google.protobuf.Any"
|
||||
json_name: "options"
|
||||
}
|
||||
field {
|
||||
name: "runtime_path"
|
||||
number: 10
|
||||
label: LABEL_OPTIONAL
|
||||
type: TYPE_STRING
|
||||
json_name: "runtimePath"
|
||||
}
|
||||
}
|
||||
message_type {
|
||||
name: "CreateTaskResponse"
|
||||
|
|
|
@ -246,7 +246,7 @@ type ListContainersRequest struct {
|
|||
// filters. Expanded, containers that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
|
|
|
@ -132,7 +132,7 @@ message ListContainersRequest {
|
|||
// filters. Expanded, containers that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
|
|
|
@ -14,6 +14,4 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
//go:generate protoc --go_out=. manifest.proto
|
||||
package containers
|
|
@ -299,7 +299,7 @@ type ListContentRequest struct {
|
|||
// filters. Expanded, containers that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
|
|
|
@ -141,7 +141,7 @@ message ListContentRequest {
|
|||
// filters. Expanded, containers that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
|
|
|
@ -14,10 +14,4 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ENODATA = syscall.ENODATA
|
||||
package content
|
|
@ -14,4 +14,4 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
package diff
|
|
@ -336,7 +336,7 @@ type ListImagesRequest struct {
|
|||
// filters. Expanded, images that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
|
|
|
@ -119,7 +119,7 @@ message ListImagesRequest {
|
|||
// filters. Expanded, images that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
|
|
|
@ -115,7 +115,7 @@ type PluginsRequest struct {
|
|||
// filters. Expanded, plugins that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
|
|
|
@ -89,7 +89,7 @@ message PluginsRequest {
|
|||
// filters. Expanded, plugins that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package snapshots
|
|
@ -620,7 +620,7 @@ type ListSnapshotsRequest struct {
|
|||
// filters. Expanded, images that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
Filters []string `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
|
|
|
@ -158,7 +158,7 @@ message ListSnapshotsRequest{
|
|||
// filters. Expanded, images that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 2;
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tasks
|
|
@ -51,6 +51,7 @@ type CreateTaskRequest struct {
|
|||
Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
Checkpoint *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
|
||||
Options *types1.Any `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"`
|
||||
RuntimePath string `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -1169,93 +1170,95 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_310e7127b8a26f14 = []byte{
|
||||
// 1376 bytes of a gzipped FileDescriptorProto
|
||||
// 1400 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xc3, 0xf2, 0x62, 0x02,
|
||||
0x5d, 0x53, 0x17, 0x55, 0x55, 0x5b, 0x55, 0xe4, 0x46, 0x64, 0x41, 0xd5, 0x74, 0x5b, 0xa0, 0xaa,
|
||||
0x84, 0xc2, 0xc6, 0x3b, 0x71, 0x46, 0xb1, 0x77, 0xb6, 0x3b, 0xe3, 0xb4, 0xe6, 0x05, 0x7e, 0x42,
|
||||
0x5f, 0x79, 0x81, 0xbf, 0x93, 0x47, 0x1e, 0x11, 0xaa, 0x02, 0xf5, 0xbf, 0xe0, 0x0d, 0xcd, 0x65,
|
||||
0xd7, 0x1b, 0x3b, 0xf6, 0x3a, 0x4d, 0xc3, 0x4b, 0x32, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf, 0xb9,
|
||||
0x7d, 0x09, 0xac, 0xb5, 0x30, 0xdb, 0xef, 0xee, 0x5a, 0x4d, 0xd2, 0xa9, 0x35, 0x89, 0xc7, 0x1c,
|
||||
0xec, 0xa1, 0xc0, 0x8d, 0x2f, 0x1d, 0x1f, 0xd7, 0x28, 0x0a, 0x0e, 0x71, 0x13, 0xd1, 0x1a, 0x73,
|
||||
0xe8, 0x01, 0xad, 0x1d, 0xde, 0x90, 0x0b, 0xcb, 0x0f, 0x08, 0x23, 0xfa, 0xb5, 0x81, 0xb4, 0x15,
|
||||
0x4a, 0x5a, 0x52, 0xe0, 0xf0, 0x86, 0xf1, 0x61, 0x8b, 0x90, 0x56, 0x1b, 0xd5, 0x84, 0xec, 0x6e,
|
||||
0x77, 0xaf, 0x86, 0x3a, 0x3e, 0xeb, 0x49, 0x55, 0xe3, 0x83, 0xe1, 0x8f, 0x8e, 0x17, 0x7e, 0x5a,
|
||||
0x68, 0x91, 0x16, 0x11, 0xcb, 0x1a, 0x5f, 0xa9, 0xd3, 0x5b, 0x53, 0xf9, 0xcb, 0x7a, 0x3e, 0xa2,
|
||||
0xb5, 0x0e, 0xe9, 0x7a, 0x4c, 0xe9, 0xdd, 0x3e, 0x8b, 0x1e, 0x62, 0x01, 0x6e, 0xaa, 0xd7, 0x19,
|
||||
0x77, 0xcf, 0xa0, 0xe9, 0x22, 0xda, 0x0c, 0xb0, 0xcf, 0x48, 0xa0, 0x94, 0xef, 0x9c, 0x41, 0x99,
|
||||
0x23, 0x26, 0x7e, 0x28, 0xdd, 0xca, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97, 0x02,
|
||||
0xe6, 0x51, 0x0a, 0xe6, 0xd7, 0x03, 0xe4, 0x30, 0xf4, 0xc4, 0xa1, 0x07, 0x36, 0x7a, 0xde, 0x45,
|
||||
0x94, 0xe9, 0x75, 0x98, 0x89, 0xcc, 0xef, 0x60, 0xb7, 0xac, 0x2d, 0x6b, 0xd5, 0xe2, 0xda, 0x95,
|
||||
0xfe, 0x71, 0xa5, 0xb4, 0x1e, 0x9e, 0x37, 0x36, 0xec, 0x52, 0x24, 0xd4, 0x70, 0xf5, 0x1a, 0xe4,
|
||||
0x02, 0x42, 0xd8, 0x1e, 0x2d, 0xa7, 0x97, 0xd3, 0xd5, 0x52, 0xfd, 0x7d, 0x2b, 0x16, 0x52, 0xe1,
|
||||
0x9d, 0xf5, 0x80, 0x83, 0x69, 0x2b, 0x31, 0x7d, 0x01, 0xb2, 0x94, 0xb9, 0xd8, 0x2b, 0x67, 0xb8,
|
||||
0x75, 0x5b, 0x6e, 0xf4, 0x45, 0xc8, 0x51, 0xe6, 0x92, 0x2e, 0x2b, 0x67, 0xc5, 0xb1, 0xda, 0xa9,
|
||||
0x73, 0x14, 0x04, 0xe5, 0x5c, 0x74, 0x8e, 0x82, 0x40, 0x37, 0xa0, 0xc0, 0x50, 0xd0, 0xc1, 0x9e,
|
||||
0xd3, 0x2e, 0xe7, 0x97, 0xb5, 0x6a, 0xc1, 0x8e, 0xf6, 0xfa, 0x3d, 0x80, 0xe6, 0x3e, 0x6a, 0x1e,
|
||||
0xf8, 0x04, 0x7b, 0xac, 0x5c, 0x58, 0xd6, 0xaa, 0xa5, 0xfa, 0xb5, 0x51, 0xb7, 0x36, 0x22, 0xc4,
|
||||
0xed, 0x98, 0xbc, 0x6e, 0x41, 0x9e, 0xf8, 0x0c, 0x13, 0x8f, 0x96, 0x8b, 0x42, 0x75, 0xc1, 0x92,
|
||||
0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xaa, 0xd7, 0xb3, 0x43, 0x21, 0xf3, 0x19, 0xe8, 0x71, 0x24, 0xa9,
|
||||
0x4f, 0x3c, 0x8a, 0xde, 0x0a, 0xca, 0x39, 0x48, 0xfb, 0xd8, 0x2d, 0xa7, 0x96, 0xb5, 0xea, 0xac,
|
||||
0xcd, 0x97, 0x66, 0x0b, 0x66, 0x1e, 0x33, 0x27, 0x60, 0xe7, 0x09, 0xd0, 0xc7, 0x90, 0x47, 0x2f,
|
||||
0x51, 0x73, 0x47, 0x59, 0x2e, 0xae, 0x41, 0xff, 0xb8, 0x92, 0xdb, 0x7c, 0x89, 0x9a, 0x8d, 0x0d,
|
||||
0x3b, 0xc7, 0x3f, 0x35, 0x5c, 0xf3, 0x23, 0x98, 0x55, 0x17, 0x29, 0xff, 0x95, 0x2f, 0xda, 0xc0,
|
||||
0x97, 0x2d, 0x98, 0xdf, 0x40, 0x6d, 0x74, 0xee, 0x8c, 0x31, 0x7f, 0xd3, 0xe0, 0xb2, 0xb4, 0x14,
|
||||
0xdd, 0xb6, 0x08, 0xa9, 0x48, 0x39, 0xd7, 0x3f, 0xae, 0xa4, 0x1a, 0x1b, 0x76, 0x0a, 0x9f, 0x82,
|
||||
0x88, 0x5e, 0x81, 0x12, 0x7a, 0x89, 0xd9, 0x0e, 0x65, 0x0e, 0xeb, 0xf2, 0x9c, 0xe3, 0x5f, 0x80,
|
||||
0x1f, 0x3d, 0x16, 0x27, 0xfa, 0x2a, 0x14, 0xf9, 0x0e, 0xb9, 0x3b, 0x0e, 0x13, 0x29, 0x56, 0xaa,
|
||||
0x1b, 0x23, 0x01, 0x7c, 0x12, 0x96, 0xc3, 0x5a, 0xe1, 0xe8, 0xb8, 0x72, 0xe9, 0xd5, 0xdf, 0x15,
|
||||
0xcd, 0x2e, 0x48, 0xb5, 0x55, 0x66, 0x12, 0x58, 0x90, 0xfe, 0x6d, 0x07, 0xa4, 0x89, 0x28, 0xbd,
|
||||
0x70, 0xf4, 0x11, 0xc0, 0x16, 0xba, 0xf8, 0x20, 0x6f, 0x42, 0x49, 0x5c, 0xa3, 0x40, 0xbf, 0x05,
|
||||
0x79, 0x5f, 0x3e, 0x50, 0x5c, 0x31, 0x54, 0x23, 0x87, 0x37, 0x54, 0x99, 0x84, 0x20, 0x84, 0xc2,
|
||||
0xe6, 0x0a, 0xcc, 0x7d, 0x83, 0x29, 0xe3, 0x69, 0x10, 0x41, 0xb3, 0x08, 0xb9, 0x3d, 0xdc, 0x66,
|
||||
0x28, 0x90, 0xde, 0xda, 0x6a, 0xc7, 0x93, 0x26, 0x26, 0x1b, 0xd5, 0x46, 0x56, 0xb4, 0xf8, 0xb2,
|
||||
0x26, 0x3a, 0xc6, 0xe4, 0x6b, 0xa5, 0xa8, 0xf9, 0x4a, 0x83, 0xd2, 0xd7, 0xb8, 0xdd, 0xbe, 0x68,
|
||||
0x90, 0x44, 0xc3, 0xc1, 0x2d, 0xde, 0x56, 0x64, 0x6e, 0xa9, 0x1d, 0x4f, 0x45, 0xa7, 0xdd, 0x16,
|
||||
0x19, 0x55, 0xb0, 0xf9, 0xd2, 0xfc, 0x57, 0x03, 0x9d, 0x2b, 0xbf, 0x83, 0x2c, 0x89, 0x7a, 0x62,
|
||||
0xea, 0xf4, 0x9e, 0x98, 0x1e, 0xd3, 0x13, 0x33, 0x63, 0x7b, 0x62, 0x76, 0xa8, 0x27, 0x56, 0x21,
|
||||
0x43, 0x7d, 0xd4, 0x14, 0x5d, 0x74, 0x5c, 0x4b, 0x13, 0x12, 0x71, 0x94, 0xf2, 0x63, 0x53, 0xe9,
|
||||
0x2a, 0xbc, 0x77, 0xe2, 0xe9, 0x32, 0xb2, 0xe6, 0xaf, 0x1a, 0xcc, 0xd9, 0x88, 0xe2, 0x9f, 0xd0,
|
||||
0x36, 0xeb, 0x5d, 0x78, 0xa8, 0x16, 0x20, 0xfb, 0x02, 0xbb, 0x6c, 0x5f, 0x45, 0x4a, 0x6e, 0x38,
|
||||
0x3a, 0xfb, 0x08, 0xb7, 0xf6, 0x65, 0xf5, 0xcf, 0xda, 0x6a, 0x67, 0xfe, 0x0c, 0x97, 0xd7, 0xdb,
|
||||
0x84, 0xa2, 0xc6, 0xc3, 0xff, 0xc3, 0x31, 0x19, 0xce, 0xb4, 0x88, 0x82, 0xdc, 0x98, 0x5f, 0xc1,
|
||||
0xdc, 0xb6, 0xd3, 0xa5, 0xe7, 0xee, 0x9f, 0x5b, 0x30, 0x6f, 0x23, 0xda, 0xed, 0x9c, 0xdb, 0xd0,
|
||||
0x26, 0x5c, 0xe1, 0xc5, 0xb9, 0x8d, 0xdd, 0xf3, 0x24, 0xaf, 0x69, 0xcb, 0x7e, 0x20, 0xcd, 0xa8,
|
||||
0x12, 0xbf, 0x0f, 0x45, 0xd5, 0x2e, 0x50, 0x58, 0xe6, 0xcb, 0x93, 0xca, 0xbc, 0xe1, 0xed, 0x11,
|
||||
0x7b, 0xa0, 0x62, 0xbe, 0xd6, 0xe0, 0xea, 0x7a, 0x34, 0x93, 0xcf, 0xcb, 0x51, 0x76, 0x60, 0xde,
|
||||
0x77, 0x02, 0xe4, 0xb1, 0x9d, 0x18, 0x2f, 0x90, 0xe1, 0xab, 0xf3, 0xfe, 0xff, 0xd7, 0x71, 0x65,
|
||||
0x25, 0xc6, 0xb6, 0x88, 0x8f, 0xbc, 0x48, 0x9d, 0xd6, 0x5a, 0xe4, 0xba, 0x8b, 0x5b, 0x88, 0x32,
|
||||
0x6b, 0x43, 0xfc, 0xb2, 0xe7, 0xa4, 0xb1, 0xf5, 0x53, 0x39, 0x43, 0x7a, 0x1a, 0xce, 0xf0, 0x14,
|
||||
0x16, 0x87, 0x5f, 0x17, 0x01, 0x57, 0x1a, 0x30, 0xc1, 0x53, 0x3b, 0xe4, 0x08, 0x79, 0x89, 0x2b,
|
||||
0x98, 0xbf, 0xa7, 0x60, 0xfe, 0x5b, 0xdf, 0x7d, 0x07, 0xc4, 0xae, 0x0e, 0xc5, 0x00, 0x51, 0xd2,
|
||||
0x0d, 0x9a, 0x88, 0x0a, 0xb0, 0xc6, 0xbd, 0x6a, 0x20, 0xa6, 0xef, 0x42, 0xc9, 0xf1, 0x3c, 0xc2,
|
||||
0x9c, 0x10, 0x0b, 0xee, 0xfd, 0x97, 0xd6, 0x24, 0x92, 0x6f, 0x8d, 0x78, 0x6b, 0xad, 0x0e, 0x4c,
|
||||
0x6c, 0x7a, 0x2c, 0xe8, 0xd9, 0x71, 0xa3, 0xc6, 0x7d, 0x98, 0x1b, 0x16, 0xe0, 0xcd, 0xf9, 0x00,
|
||||
0xf5, 0xd4, 0xec, 0xe1, 0x4b, 0x5e, 0x82, 0x87, 0x4e, 0xbb, 0x8b, 0xc2, 0x8e, 0x2a, 0x36, 0x77,
|
||||
0x52, 0xb7, 0x35, 0x73, 0x05, 0x2e, 0x3f, 0x90, 0x2c, 0x3d, 0x44, 0xa7, 0x0c, 0x79, 0x39, 0xae,
|
||||
0x24, 0xde, 0x45, 0x3b, 0xdc, 0xf2, 0x0a, 0x89, 0x64, 0xa3, 0xe1, 0x95, 0x57, 0x24, 0x5f, 0x05,
|
||||
0xa7, 0x7c, 0x0a, 0xe1, 0x15, 0x02, 0x76, 0x28, 0x68, 0xee, 0x41, 0xe9, 0x7b, 0x07, 0x5f, 0xfc,
|
||||
0x80, 0x0f, 0x60, 0x46, 0xde, 0xa3, 0x7c, 0x1d, 0x22, 0x4b, 0xda, 0x64, 0xb2, 0x94, 0x7a, 0x1b,
|
||||
0xb2, 0x54, 0x7f, 0x3d, 0x03, 0x59, 0x31, 0xde, 0xf5, 0x03, 0xc8, 0x49, 0x22, 0xac, 0xd7, 0x26,
|
||||
0x47, 0x7c, 0xe4, 0x0f, 0x0f, 0xe3, 0xf3, 0xe9, 0x15, 0xd4, 0xd3, 0x7e, 0x84, 0xac, 0x20, 0xac,
|
||||
0xfa, 0xca, 0x64, 0xd5, 0x38, 0x7d, 0x36, 0x3e, 0x9d, 0x4a, 0x56, 0xdd, 0xd0, 0x82, 0x9c, 0x64,
|
||||
0x81, 0x49, 0xcf, 0x19, 0x61, 0xc5, 0xc6, 0x67, 0xd3, 0x28, 0x44, 0x17, 0x3d, 0x87, 0xd9, 0x13,
|
||||
0x74, 0x53, 0xaf, 0x4f, 0xa3, 0x7e, 0x92, 0x75, 0x9c, 0xf1, 0xca, 0x67, 0x90, 0xde, 0x42, 0x4c,
|
||||
0xaf, 0x4e, 0x56, 0x1a, 0x70, 0x52, 0xe3, 0x93, 0x29, 0x24, 0x23, 0xdc, 0x32, 0x7c, 0x1c, 0xe8,
|
||||
0xd6, 0x64, 0x95, 0x61, 0x0a, 0x69, 0xd4, 0xa6, 0x96, 0x57, 0x17, 0x35, 0x20, 0xc3, 0x19, 0xa1,
|
||||
0x9e, 0xe0, 0x5b, 0x8c, 0x35, 0x1a, 0x8b, 0x23, 0xc9, 0xbd, 0xd9, 0xf1, 0x59, 0x4f, 0xdf, 0x86,
|
||||
0x0c, 0x2f, 0x25, 0x3d, 0x21, 0x0f, 0x47, 0xd9, 0xde, 0x58, 0x8b, 0x8f, 0xa1, 0x18, 0x11, 0xa1,
|
||||
0x24, 0x28, 0x86, 0x19, 0xd3, 0x58, 0xa3, 0x0f, 0x21, 0xaf, 0x28, 0x8c, 0x9e, 0x10, 0xef, 0x93,
|
||||
0x4c, 0x67, 0x82, 0xc1, 0xac, 0xa0, 0x24, 0x49, 0x1e, 0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x41,
|
||||
0x4e, 0x72, 0x93, 0xa4, 0xa2, 0x19, 0x61, 0x30, 0x63, 0x4d, 0x62, 0x28, 0x84, 0xf4, 0x42, 0xbf,
|
||||
0x9e, 0x9c, 0x23, 0x31, 0x36, 0x63, 0x58, 0xd3, 0x8a, 0xab, 0x8c, 0x7a, 0x01, 0x10, 0x1b, 0xea,
|
||||
0x37, 0x13, 0x20, 0x3e, 0x8d, 0x9e, 0x18, 0x5f, 0x9c, 0x4d, 0x49, 0x5d, 0xfc, 0x08, 0x72, 0x72,
|
||||
0x0c, 0x26, 0xc1, 0x36, 0x32, 0x2c, 0xc7, 0xc2, 0xb6, 0x07, 0x79, 0x35, 0xba, 0x92, 0x72, 0xe5,
|
||||
0xe4, 0x34, 0x34, 0xae, 0x4f, 0x29, 0xad, 0x5c, 0xff, 0x01, 0x32, 0x7c, 0xe6, 0x24, 0x55, 0x61,
|
||||
0x6c, 0xfe, 0x19, 0x2b, 0xd3, 0x88, 0x4a, 0xf3, 0x6b, 0xdf, 0x1d, 0xbd, 0x59, 0xba, 0xf4, 0xe7,
|
||||
0x9b, 0xa5, 0x4b, 0xbf, 0xf4, 0x97, 0xb4, 0xa3, 0xfe, 0x92, 0xf6, 0x47, 0x7f, 0x49, 0xfb, 0xa7,
|
||||
0xbf, 0xa4, 0x3d, 0xbb, 0xf7, 0x76, 0xff, 0x7e, 0xbc, 0x2b, 0x16, 0x4f, 0x53, 0xbb, 0x39, 0x01,
|
||||
0xd8, 0xcd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x55, 0xbf, 0x54, 0xc7, 0x14, 0x00, 0x00,
|
||||
0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xd3, 0xe5, 0xc5, 0x04,
|
||||
0xba, 0xa6, 0x2e, 0xaa, 0xaa, 0xb6, 0xaa, 0xc8, 0x8d, 0xc8, 0x82, 0xaa, 0xe9, 0xb6, 0x40, 0x55,
|
||||
0x09, 0x99, 0x8d, 0x77, 0x62, 0x8f, 0x62, 0xef, 0x6c, 0x77, 0xc6, 0x69, 0xcd, 0x0b, 0xfc, 0x84,
|
||||
0xbe, 0xf2, 0x02, 0x7f, 0xa7, 0x8f, 0x3c, 0x22, 0x54, 0x05, 0xea, 0x57, 0x7e, 0x01, 0x6f, 0x68,
|
||||
0x2e, 0xbb, 0xde, 0xd8, 0xf1, 0x25, 0x4d, 0xc3, 0x4b, 0x3b, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf,
|
||||
0xb9, 0x7d, 0x0e, 0x6c, 0x34, 0x31, 0x6b, 0x75, 0xf7, 0xac, 0x06, 0xe9, 0x54, 0x1a, 0xc4, 0x63,
|
||||
0x0e, 0xf6, 0x50, 0xe0, 0xc6, 0x97, 0x8e, 0x8f, 0x2b, 0x14, 0x05, 0x87, 0xb8, 0x81, 0x68, 0x85,
|
||||
0x39, 0xf4, 0x80, 0x56, 0x0e, 0xaf, 0xcb, 0x85, 0xe5, 0x07, 0x84, 0x11, 0xfd, 0xca, 0x40, 0xda,
|
||||
0x0a, 0x25, 0x2d, 0x29, 0x70, 0x78, 0xdd, 0xf8, 0xb0, 0x49, 0x48, 0xb3, 0x8d, 0x2a, 0x42, 0x76,
|
||||
0xaf, 0xbb, 0x5f, 0x41, 0x1d, 0x9f, 0xf5, 0xa4, 0xaa, 0xf1, 0xc1, 0xf0, 0x47, 0xc7, 0x0b, 0x3f,
|
||||
0x2d, 0x35, 0x49, 0x93, 0x88, 0x65, 0x85, 0xaf, 0xd4, 0xe9, 0xcd, 0x99, 0xfc, 0x65, 0x3d, 0x1f,
|
||||
0xd1, 0x4a, 0x87, 0x74, 0x3d, 0xa6, 0xf4, 0x6e, 0x9d, 0x46, 0x0f, 0xb1, 0x00, 0x37, 0xd4, 0xeb,
|
||||
0x8c, 0x3b, 0xa7, 0xd0, 0x74, 0x11, 0x6d, 0x04, 0xd8, 0x67, 0x24, 0x50, 0xca, 0xb7, 0x4f, 0xa1,
|
||||
0xcc, 0x11, 0x13, 0xff, 0x28, 0xdd, 0xd2, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97,
|
||||
0x02, 0xe6, 0x3f, 0x09, 0x58, 0xdc, 0x0c, 0x90, 0xc3, 0xd0, 0x63, 0x87, 0x1e, 0xd8, 0xe8, 0x59,
|
||||
0x17, 0x51, 0xa6, 0x57, 0x61, 0x2e, 0x32, 0x5f, 0xc7, 0x6e, 0x51, 0x5b, 0xd5, 0xca, 0xf9, 0x8d,
|
||||
0x4b, 0xfd, 0xa3, 0x52, 0x61, 0x33, 0x3c, 0xaf, 0x6d, 0xd9, 0x85, 0x48, 0xa8, 0xe6, 0xea, 0x15,
|
||||
0xc8, 0x04, 0x84, 0xb0, 0x7d, 0x5a, 0x4c, 0xae, 0x26, 0xcb, 0x85, 0xea, 0xfb, 0x56, 0x2c, 0xa4,
|
||||
0xc2, 0x3b, 0xeb, 0x3e, 0x07, 0xd3, 0x56, 0x62, 0xfa, 0x12, 0xa4, 0x29, 0x73, 0xb1, 0x57, 0x4c,
|
||||
0x71, 0xeb, 0xb6, 0xdc, 0xe8, 0xcb, 0x90, 0xa1, 0xcc, 0x25, 0x5d, 0x56, 0x4c, 0x8b, 0x63, 0xb5,
|
||||
0x53, 0xe7, 0x28, 0x08, 0x8a, 0x99, 0xe8, 0x1c, 0x05, 0x81, 0x6e, 0x40, 0x8e, 0xa1, 0xa0, 0x83,
|
||||
0x3d, 0xa7, 0x5d, 0xcc, 0xae, 0x6a, 0xe5, 0x9c, 0x1d, 0xed, 0xf5, 0xbb, 0x00, 0x8d, 0x16, 0x6a,
|
||||
0x1c, 0xf8, 0x04, 0x7b, 0xac, 0x98, 0x5b, 0xd5, 0xca, 0x85, 0xea, 0x95, 0x51, 0xb7, 0xb6, 0x22,
|
||||
0xc4, 0xed, 0x98, 0xbc, 0x6e, 0x41, 0x96, 0xf8, 0x0c, 0x13, 0x8f, 0x16, 0xf3, 0x42, 0x75, 0xc9,
|
||||
0x92, 0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xba, 0xd7, 0xb3, 0x43, 0x21, 0xfd, 0x2a, 0xcc, 0x05, 0x5d,
|
||||
0x8f, 0x03, 0x5c, 0xf7, 0x1d, 0xd6, 0x2a, 0x82, 0xf0, 0xb3, 0xa0, 0xce, 0x76, 0x1d, 0xd6, 0x32,
|
||||
0x9f, 0x82, 0x1e, 0x07, 0x9b, 0xfa, 0xc4, 0xa3, 0xe8, 0xad, 0xd0, 0x5e, 0x80, 0xa4, 0x8f, 0xdd,
|
||||
0x62, 0x62, 0x55, 0x2b, 0xcf, 0xdb, 0x7c, 0x69, 0x36, 0x61, 0xee, 0x11, 0x73, 0x02, 0x76, 0x96,
|
||||
0x18, 0x7e, 0x04, 0x59, 0xf4, 0x02, 0x35, 0xea, 0xca, 0x72, 0x7e, 0x03, 0xfa, 0x47, 0xa5, 0xcc,
|
||||
0xf6, 0x0b, 0xd4, 0xa8, 0x6d, 0xd9, 0x19, 0xfe, 0xa9, 0xe6, 0x9a, 0x57, 0x61, 0x5e, 0x5d, 0xa4,
|
||||
0xfc, 0x57, 0xbe, 0x68, 0x03, 0x5f, 0x76, 0x60, 0x71, 0x0b, 0xb5, 0xd1, 0x99, 0x93, 0xca, 0xfc,
|
||||
0x55, 0x83, 0x8b, 0xd2, 0x52, 0x74, 0xdb, 0x32, 0x24, 0x22, 0xe5, 0x4c, 0xff, 0xa8, 0x94, 0xa8,
|
||||
0x6d, 0xd9, 0x09, 0x7c, 0x02, 0x22, 0x7a, 0x09, 0x0a, 0xe8, 0x05, 0x66, 0x75, 0xca, 0x1c, 0xd6,
|
||||
0xe5, 0x69, 0xc9, 0xbf, 0x00, 0x3f, 0x7a, 0x24, 0x4e, 0xf4, 0x75, 0xc8, 0xf3, 0x1d, 0x72, 0xeb,
|
||||
0x0e, 0x13, 0x59, 0x58, 0xa8, 0x1a, 0x23, 0x31, 0x7e, 0x1c, 0x56, 0xcc, 0x46, 0xee, 0xd5, 0x51,
|
||||
0xe9, 0xc2, 0xcb, 0xbf, 0x4a, 0x9a, 0x9d, 0x93, 0x6a, 0xeb, 0xcc, 0x24, 0xb0, 0x24, 0xfd, 0xdb,
|
||||
0x0d, 0x48, 0x03, 0x51, 0x7a, 0xee, 0xe8, 0x23, 0x80, 0x1d, 0x74, 0xfe, 0x41, 0xde, 0x86, 0x82,
|
||||
0xb8, 0x46, 0x81, 0x7e, 0x13, 0xb2, 0xbe, 0x7c, 0xa0, 0xb8, 0x62, 0xa8, 0x8c, 0x0e, 0xaf, 0xab,
|
||||
0x4a, 0x0a, 0x41, 0x08, 0x85, 0xcd, 0x35, 0x58, 0xf8, 0x1a, 0x53, 0xc6, 0xd3, 0x20, 0x82, 0x66,
|
||||
0x19, 0x32, 0xfb, 0xb8, 0xcd, 0x50, 0x20, 0xbd, 0xb5, 0xd5, 0x8e, 0x27, 0x4d, 0x4c, 0x36, 0xaa,
|
||||
0x8d, 0xb4, 0x98, 0x02, 0x45, 0x4d, 0x34, 0x95, 0xc9, 0xd7, 0x4a, 0x51, 0xf3, 0xa5, 0x06, 0x85,
|
||||
0xaf, 0x70, 0xbb, 0x7d, 0xde, 0x20, 0x89, 0x9e, 0x84, 0x9b, 0xbc, 0xf3, 0xc8, 0xdc, 0x52, 0x3b,
|
||||
0x9e, 0x8a, 0x4e, 0xbb, 0x2d, 0x32, 0x2a, 0x67, 0xf3, 0xa5, 0xf9, 0xaf, 0x06, 0x3a, 0x57, 0x7e,
|
||||
0x07, 0x59, 0x12, 0xb5, 0xcd, 0xc4, 0xc9, 0x6d, 0x33, 0x39, 0xa6, 0x6d, 0xa6, 0xc6, 0xb6, 0xcd,
|
||||
0xf4, 0x50, 0xdb, 0x2c, 0x43, 0x8a, 0xfa, 0xa8, 0x21, 0x1a, 0xed, 0xb8, 0xae, 0x27, 0x24, 0xe2,
|
||||
0x28, 0x65, 0xc7, 0xa6, 0xd2, 0x65, 0x78, 0xef, 0xd8, 0xd3, 0x65, 0x64, 0xcd, 0x5f, 0x34, 0x58,
|
||||
0xb0, 0x11, 0xc5, 0x3f, 0xa2, 0x5d, 0xd6, 0x3b, 0xf7, 0x50, 0x2d, 0x41, 0xfa, 0x39, 0x76, 0x59,
|
||||
0x4b, 0x45, 0x4a, 0x6e, 0x38, 0x3a, 0x2d, 0x84, 0x9b, 0x2d, 0x59, 0xfd, 0xf3, 0xb6, 0xda, 0x99,
|
||||
0x3f, 0xc1, 0xc5, 0xcd, 0x36, 0xa1, 0xa8, 0xf6, 0xe0, 0xff, 0x70, 0x4c, 0x86, 0x33, 0x29, 0xa2,
|
||||
0x20, 0x37, 0xe6, 0x97, 0xb0, 0xb0, 0xeb, 0x74, 0xe9, 0x99, 0xfb, 0xe7, 0x0e, 0x2c, 0xda, 0x88,
|
||||
0x76, 0x3b, 0x67, 0x36, 0xb4, 0x0d, 0x97, 0x78, 0x71, 0xee, 0x62, 0xf7, 0x2c, 0xc9, 0x6b, 0xda,
|
||||
0xb2, 0x1f, 0x48, 0x33, 0xaa, 0xc4, 0xef, 0x41, 0x5e, 0xb5, 0x0b, 0x14, 0x96, 0xf9, 0xea, 0xa4,
|
||||
0x32, 0xaf, 0x79, 0xfb, 0xc4, 0x1e, 0xa8, 0x98, 0xaf, 0x35, 0xb8, 0xbc, 0x19, 0x8d, 0xed, 0xb3,
|
||||
0xd2, 0x98, 0x3a, 0x2c, 0xfa, 0x4e, 0x80, 0x3c, 0x56, 0x8f, 0x51, 0x07, 0x19, 0xbe, 0x2a, 0xef,
|
||||
0xff, 0x7f, 0x1e, 0x95, 0xd6, 0x62, 0x84, 0x8c, 0xf8, 0xc8, 0x8b, 0xd4, 0x69, 0xa5, 0x49, 0xae,
|
||||
0xb9, 0xb8, 0x89, 0x28, 0xb3, 0xb6, 0xc4, 0x7f, 0xf6, 0x82, 0x34, 0xb6, 0x79, 0x22, 0xad, 0x48,
|
||||
0xce, 0x40, 0x2b, 0xcc, 0x27, 0xb0, 0x3c, 0xfc, 0xba, 0x08, 0xb8, 0xc2, 0x80, 0x2c, 0x9e, 0xd8,
|
||||
0x21, 0x47, 0xf8, 0x4d, 0x5c, 0xc1, 0xfc, 0x2d, 0x01, 0x8b, 0xdf, 0xf8, 0xee, 0x3b, 0xe0, 0x7e,
|
||||
0x55, 0xc8, 0x07, 0x88, 0x92, 0x6e, 0xd0, 0x40, 0x54, 0x80, 0x35, 0xee, 0x55, 0x03, 0x31, 0x7d,
|
||||
0x0f, 0x0a, 0x8e, 0xe7, 0x11, 0xe6, 0x84, 0x58, 0x70, 0xef, 0xbf, 0xb0, 0x26, 0xfd, 0x0e, 0xb0,
|
||||
0x46, 0xbc, 0xb5, 0xd6, 0x07, 0x26, 0xb6, 0x3d, 0x16, 0xf4, 0xec, 0xb8, 0x51, 0xe3, 0x1e, 0x2c,
|
||||
0x0c, 0x0b, 0xf0, 0xe6, 0x7c, 0x80, 0x7a, 0x6a, 0xf6, 0xf0, 0x25, 0x2f, 0xc1, 0x43, 0xa7, 0xdd,
|
||||
0x45, 0x61, 0x47, 0x15, 0x9b, 0xdb, 0x89, 0x5b, 0x9a, 0xb9, 0x06, 0x17, 0xef, 0x4b, 0x22, 0x1f,
|
||||
0xa2, 0x53, 0x84, 0xac, 0x1c, 0x57, 0x12, 0xef, 0xbc, 0x1d, 0x6e, 0x79, 0x85, 0x44, 0xb2, 0xd1,
|
||||
0xf0, 0xca, 0xaa, 0xdf, 0x01, 0x2a, 0x38, 0xc5, 0x13, 0x38, 0xb1, 0x10, 0xb0, 0x43, 0x41, 0x73,
|
||||
0x1f, 0x0a, 0xdf, 0x39, 0xf8, 0xfc, 0x07, 0x7c, 0x00, 0x73, 0xf2, 0x1e, 0xe5, 0xeb, 0x10, 0x59,
|
||||
0xd2, 0x26, 0x93, 0xa5, 0xc4, 0xdb, 0x90, 0xa5, 0xea, 0xeb, 0x39, 0x48, 0x8b, 0xf1, 0xae, 0x1f,
|
||||
0x40, 0x46, 0x12, 0x61, 0xbd, 0x32, 0x39, 0xe2, 0x23, 0xbf, 0x4d, 0x8c, 0xcf, 0x66, 0x57, 0x50,
|
||||
0x4f, 0xfb, 0x01, 0xd2, 0x82, 0xb0, 0xea, 0x6b, 0x93, 0x55, 0xe3, 0xf4, 0xd9, 0xf8, 0x64, 0x26,
|
||||
0x59, 0x75, 0x43, 0x13, 0x32, 0x92, 0x05, 0x4e, 0x7b, 0xce, 0x08, 0x2b, 0x36, 0x3e, 0x9d, 0x45,
|
||||
0x21, 0xba, 0xe8, 0x19, 0xcc, 0x1f, 0xa3, 0x9b, 0x7a, 0x75, 0x16, 0xf5, 0xe3, 0xac, 0xe3, 0x94,
|
||||
0x57, 0x3e, 0x85, 0xe4, 0x0e, 0x62, 0x7a, 0x79, 0xb2, 0xd2, 0x80, 0x93, 0x1a, 0x1f, 0xcf, 0x20,
|
||||
0x19, 0xe1, 0x96, 0xe2, 0xe3, 0x40, 0xb7, 0x26, 0xab, 0x0c, 0x53, 0x48, 0xa3, 0x32, 0xb3, 0xbc,
|
||||
0xba, 0xa8, 0x06, 0x29, 0xce, 0x08, 0xf5, 0x29, 0xbe, 0xc5, 0x58, 0xa3, 0xb1, 0x3c, 0x92, 0xdc,
|
||||
0xdb, 0x1d, 0x9f, 0xf5, 0xf4, 0x5d, 0x48, 0xf1, 0x52, 0xd2, 0xa7, 0xe4, 0xe1, 0x28, 0xdb, 0x1b,
|
||||
0x6b, 0xf1, 0x11, 0xe4, 0x23, 0x22, 0x34, 0x0d, 0x8a, 0x61, 0xc6, 0x34, 0xd6, 0xe8, 0x03, 0xc8,
|
||||
0x2a, 0x0a, 0xa3, 0x4f, 0x89, 0xf7, 0x71, 0xa6, 0x33, 0xc1, 0x60, 0x5a, 0x50, 0x92, 0x69, 0x1e,
|
||||
0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x42, 0x46, 0x72, 0x93, 0x69, 0x45, 0x33, 0xc2, 0x60, 0xc6,
|
||||
0x9a, 0xc4, 0x90, 0x0b, 0xe9, 0x85, 0x7e, 0x6d, 0x7a, 0x8e, 0xc4, 0xd8, 0x8c, 0x61, 0xcd, 0x2a,
|
||||
0xae, 0x32, 0xea, 0x39, 0x40, 0x6c, 0xa8, 0xdf, 0x98, 0x02, 0xf1, 0x49, 0xf4, 0xc4, 0xf8, 0xfc,
|
||||
0x74, 0x4a, 0xea, 0xe2, 0x87, 0x90, 0x91, 0x63, 0x70, 0x1a, 0x6c, 0x23, 0xc3, 0x72, 0x2c, 0x6c,
|
||||
0xfb, 0x90, 0x55, 0xa3, 0x6b, 0x5a, 0xae, 0x1c, 0x9f, 0x86, 0xc6, 0xb5, 0x19, 0xa5, 0x95, 0xeb,
|
||||
0xdf, 0x43, 0x8a, 0xcf, 0x9c, 0x69, 0x55, 0x18, 0x9b, 0x7f, 0xc6, 0xda, 0x2c, 0xa2, 0xd2, 0xfc,
|
||||
0xc6, 0xb7, 0xaf, 0xde, 0xac, 0x5c, 0xf8, 0xe3, 0xcd, 0xca, 0x85, 0x9f, 0xfb, 0x2b, 0xda, 0xab,
|
||||
0xfe, 0x8a, 0xf6, 0x7b, 0x7f, 0x45, 0xfb, 0xbb, 0xbf, 0xa2, 0x3d, 0xbd, 0xfb, 0x76, 0x7f, 0xa1,
|
||||
0xbc, 0x23, 0x16, 0x4f, 0x12, 0x7b, 0x19, 0x01, 0xd8, 0x8d, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff,
|
||||
0xc7, 0x3c, 0xaa, 0x56, 0xea, 0x14, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -1946,6 +1949,13 @@ func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.RuntimePath) > 0 {
|
||||
i -= len(m.RuntimePath)
|
||||
copy(dAtA[i:], m.RuntimePath)
|
||||
i = encodeVarintTasks(dAtA, i, uint64(len(m.RuntimePath)))
|
||||
i--
|
||||
dAtA[i] = 0x52
|
||||
}
|
||||
if m.Options != nil {
|
||||
{
|
||||
size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
|
||||
|
@ -3198,6 +3208,10 @@ func (m *CreateTaskRequest) Size() (n int) {
|
|||
l = m.Options.Size()
|
||||
n += 1 + l + sovTasks(uint64(l))
|
||||
}
|
||||
l = len(m.RuntimePath)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTasks(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -3747,6 +3761,7 @@ func (this *CreateTaskRequest) String() string {
|
|||
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
|
||||
`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`,
|
||||
`RuntimePath:` + fmt.Sprintf("%v", this.RuntimePath) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
|
@ -4385,6 +4400,38 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 10:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RuntimePath", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTasks
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RuntimePath = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTasks(dAtA[iNdEx:])
|
||||
|
|
|
@ -88,6 +88,8 @@ message CreateTaskRequest {
|
|||
containerd.types.Descriptor checkpoint = 8;
|
||||
|
||||
google.protobuf.Any options = 9;
|
||||
|
||||
string runtime_path = 10;
|
||||
}
|
||||
|
||||
message CreateTaskResponse {
|
||||
|
|
|
@ -14,5 +14,5 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package events defines the event pushing and subscription service.
|
||||
// Package events defines the ttrpc event service.
|
||||
package events
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package version defines the version service.
|
||||
package version
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package task defines the task service.
|
||||
package task
|
|
@ -21,15 +21,16 @@ import (
|
|||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -125,17 +126,52 @@ func (r *bufferedReader) Peek(n int) ([]byte, error) {
|
|||
return r.buf.Peek(n)
|
||||
}
|
||||
|
||||
const (
|
||||
zstdMagicSkippableStart = 0x184D2A50
|
||||
zstdMagicSkippableMask = 0xFFFFFFF0
|
||||
)
|
||||
|
||||
var (
|
||||
gzipMagic = []byte{0x1F, 0x8B, 0x08}
|
||||
zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
|
||||
)
|
||||
|
||||
type matcher = func([]byte) bool
|
||||
|
||||
func magicNumberMatcher(m []byte) matcher {
|
||||
return func(source []byte) bool {
|
||||
return bytes.HasPrefix(source, m)
|
||||
}
|
||||
}
|
||||
|
||||
// zstdMatcher detects zstd compression algorithm.
|
||||
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
|
||||
// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details.
|
||||
func zstdMatcher() matcher {
|
||||
return func(source []byte) bool {
|
||||
if bytes.HasPrefix(source, zstdMagic) {
|
||||
// Zstandard frame
|
||||
return true
|
||||
}
|
||||
// skippable frame
|
||||
if len(source) < 8 {
|
||||
return false
|
||||
}
|
||||
// magic number from 0x184D2A50 to 0x184D2A5F.
|
||||
if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// DetectCompression detects the compression algorithm of the source.
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
Zstd: {0x28, 0xb5, 0x2f, 0xfd},
|
||||
for compression, fn := range map[Compression]matcher{
|
||||
Gzip: magicNumberMatcher(gzipMagic),
|
||||
Zstd: zstdMatcher(),
|
||||
} {
|
||||
if len(source) < len(m) {
|
||||
// Len too short
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(m, source[:len(m)]) {
|
||||
if fn(source) {
|
||||
return compression
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,15 +20,15 @@ import (
|
|||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -79,7 +79,7 @@ func testCompressDecompress(t *testing.T, size int, compression Compression) Dec
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
decompressed, err := ioutil.ReadAll(decompressor)
|
||||
decompressed, err := io.ReadAll(decompressor)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -122,10 +122,7 @@ func TestCompressDecompressUncompressed(t *testing.T) {
|
|||
|
||||
func TestDetectPigz(t *testing.T) {
|
||||
// Create fake PATH with unpigz executable, make sure detectPigz can find it
|
||||
tempPath, err := ioutil.TempDir("", "containerd_temp_")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tempPath := t.TempDir()
|
||||
|
||||
filename := "unpigz"
|
||||
if runtime.GOOS == "windows" {
|
||||
|
@ -134,15 +131,11 @@ func TestDetectPigz(t *testing.T) {
|
|||
|
||||
fullPath := filepath.Join(tempPath, filename)
|
||||
|
||||
if err := ioutil.WriteFile(fullPath, []byte(""), 0111); err != nil {
|
||||
if err := os.WriteFile(fullPath, []byte(""), 0111); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(tempPath)
|
||||
|
||||
oldPath := os.Getenv("PATH")
|
||||
os.Setenv("PATH", tempPath)
|
||||
defer os.Setenv("PATH", oldPath)
|
||||
t.Setenv("PATH", tempPath)
|
||||
|
||||
if pigzPath := detectPigz(); pigzPath == "" {
|
||||
t.Fatal("failed to detect pigz path")
|
||||
|
@ -150,8 +143,7 @@ func TestDetectPigz(t *testing.T) {
|
|||
t.Fatalf("wrong pigz found: %s != %s", pigzPath, fullPath)
|
||||
}
|
||||
|
||||
os.Setenv(disablePigzEnv, "1")
|
||||
defer os.Unsetenv(disablePigzEnv)
|
||||
t.Setenv(disablePigzEnv, "1")
|
||||
|
||||
if pigzPath := detectPigz(); pigzPath != "" {
|
||||
t.Fatalf("disable via %s doesn't work", disablePigzEnv)
|
||||
|
@ -164,7 +156,7 @@ func TestCmdStream(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(out)
|
||||
buf, err := io.ReadAll(out)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read from stdout: %s", err)
|
||||
}
|
||||
|
@ -180,7 +172,7 @@ func TestCmdStreamBad(t *testing.T) {
|
|||
t.Fatalf("failed to start command: %v", err)
|
||||
}
|
||||
|
||||
if buf, err := ioutil.ReadAll(out); err == nil {
|
||||
if buf, err := io.ReadAll(out); err == nil {
|
||||
t.Fatal("command should have failed")
|
||||
} else if err.Error() != "exit status 1: bad result\n" {
|
||||
t.Fatalf("wrong error: %s", err.Error())
|
||||
|
@ -188,3 +180,39 @@ func TestCmdStreamBad(t *testing.T) {
|
|||
t.Fatalf("wrong output: %s", string(buf))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectCompressionZstd(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
source []byte
|
||||
expected Compression
|
||||
}{
|
||||
{
|
||||
// test zstd compression without skippable frames.
|
||||
source: []byte{
|
||||
0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528
|
||||
0x04, 0x00, 0x31, 0x00, 0x00, // frame header
|
||||
0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker"
|
||||
0x16, 0x0e, 0x21, 0xc3, // content checksum
|
||||
},
|
||||
expected: Zstd,
|
||||
},
|
||||
{
|
||||
// test zstd compression with skippable frames.
|
||||
source: []byte{
|
||||
0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F
|
||||
0x04, 0x00, 0x00, 0x00, // frame size
|
||||
0x5d, 0x00, 0x00, 0x00, // user data
|
||||
0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528
|
||||
0x04, 0x00, 0x31, 0x00, 0x00, // frame header
|
||||
0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker"
|
||||
0x16, 0x0e, 0x21, 0xc3, // content checksum
|
||||
},
|
||||
expected: Zstd,
|
||||
},
|
||||
} {
|
||||
compression := DetectCompression(tc.source)
|
||||
if compression != tc.expected {
|
||||
t.Fatalf("Unexpected compression %v, expected %v", compression, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package archive
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -37,11 +36,7 @@ func TestPrefixHeaderReadable(t *testing.T) {
|
|||
// https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go
|
||||
var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00")
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "prefix-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
r, err := compression.DecompressStream(bytes.NewReader(testFile))
|
||||
if err != nil {
|
||||
|
|
107
archive/tar.go
107
archive/tar.go
|
@ -19,6 +19,8 @@ package archive
|
|||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -28,9 +30,10 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/archive/tarheader"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/pkg/userns"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = &sync.Pool{
|
||||
|
@ -48,12 +51,15 @@ var errInvalidArchive = errors.New("invalid archive")
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func Diff(ctx context.Context, a, b string) io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err := WriteDiff(ctx, w, a, b)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("write diff failed")
|
||||
}
|
||||
if err = w.CloseWithError(err); err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
|
||||
}
|
||||
|
@ -68,12 +74,12 @@ func Diff(ctx context.Context, a, b string) io.ReadCloser {
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffOpt) error {
|
||||
var options WriteDiffOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return errors.Wrap(err, "failed to apply option")
|
||||
return fmt.Errorf("failed to apply option: %w", err)
|
||||
}
|
||||
}
|
||||
if options.writeDiffFunc == nil {
|
||||
|
@ -89,12 +95,12 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOptions) error {
|
||||
cw := newChangeWriter(w, b)
|
||||
cw := NewChangeWriter(w, b)
|
||||
err := fs.Changes(ctx, a, b, cw.HandleChange)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create diff tar stream")
|
||||
return fmt.Errorf("failed to create diff tar stream: %w", err)
|
||||
}
|
||||
return cw.Close()
|
||||
}
|
||||
|
@ -102,7 +108,7 @@ func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOp
|
|||
const (
|
||||
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
|
||||
whiteoutPrefix = ".wh."
|
||||
|
||||
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
|
@ -115,17 +121,19 @@ const (
|
|||
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
|
||||
|
||||
paxSchilyXattr = "SCHILY.xattr."
|
||||
|
||||
userXattrPrefix = "user."
|
||||
)
|
||||
|
||||
// Apply applies a tar stream of an OCI style diff tar.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
|
||||
func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
|
||||
root = filepath.Clean(root)
|
||||
|
||||
var options ApplyOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return 0, errors.Wrap(err, "failed to apply option")
|
||||
return 0, fmt.Errorf("failed to apply option: %w", err)
|
||||
}
|
||||
}
|
||||
if options.Filter == nil {
|
||||
|
@ -140,7 +148,7 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int
|
|||
|
||||
// applyNaive applies a tar stream of an OCI style diff tar to a directory
|
||||
// applying each file as either a whole file or whiteout.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
|
||||
func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
|
||||
var (
|
||||
dirs []*tar.Header
|
||||
|
@ -233,7 +241,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti
|
|||
ppath, base := filepath.Split(hdr.Name)
|
||||
ppath, err = fs.RootPath(root, ppath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get root path")
|
||||
return 0, fmt.Errorf("failed to get root path: %w", err)
|
||||
}
|
||||
|
||||
// Join to root before joining to parent path to ensure relative links are
|
||||
|
@ -263,7 +271,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti
|
|||
}
|
||||
writeFile, err := convertWhiteout(hdr, path)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name)
|
||||
return 0, fmt.Errorf("failed to convert whiteout file %q: %w", hdr.Name, err)
|
||||
}
|
||||
if !writeFile {
|
||||
continue
|
||||
|
@ -324,6 +332,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
|
|||
}
|
||||
}
|
||||
|
||||
//nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode())
|
||||
if err != nil {
|
||||
|
@ -370,12 +379,16 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
|
|||
return nil
|
||||
|
||||
default:
|
||||
return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
|
||||
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
|
||||
}
|
||||
|
||||
// Lchown is not supported on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||
err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err)
|
||||
if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() {
|
||||
err = fmt.Errorf("%w (Hint: try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -384,11 +397,19 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
|
|||
if strings.HasPrefix(key, paxSchilyXattr) {
|
||||
key = key[len(paxSchilyXattr):]
|
||||
if err := setxattr(path, key, value); err != nil {
|
||||
if errors.Is(err, syscall.EPERM) && strings.HasPrefix(key, userXattrPrefix) {
|
||||
// In the user.* namespace, only regular files and directories can have extended attributes.
|
||||
// See https://man7.org/linux/man-pages/man7/xattr.7.html for details.
|
||||
if fi, err := os.Lstat(path); err == nil && (!fi.Mode().IsRegular() && !fi.Mode().IsDir()) {
|
||||
log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if errors.Is(err, syscall.ENOTSUP) {
|
||||
log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
return fmt.Errorf("failed to setxattr %q for key %q: %w", path, key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -461,7 +482,17 @@ func mkparent(ctx context.Context, path, root string, parents []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type changeWriter struct {
|
||||
// ChangeWriter provides tar stream from filesystem change information.
|
||||
// The privided tar stream is styled as an OCI layer. Change information
|
||||
// (add/modify/delete/unmodified) for each file needs to be passed to this
|
||||
// writer through HandleChange method.
|
||||
//
|
||||
// This should be used combining with continuity's diff computing functionality
|
||||
// (e.g. `fs.Change` of github.com/containerd/continuity/fs).
|
||||
//
|
||||
// See also https://github.com/opencontainers/image-spec/blob/main/layer.md for details
|
||||
// about OCI layers
|
||||
type ChangeWriter struct {
|
||||
tw *tar.Writer
|
||||
source string
|
||||
whiteoutT time.Time
|
||||
|
@ -470,8 +501,11 @@ type changeWriter struct {
|
|||
addedDirs map[string]struct{}
|
||||
}
|
||||
|
||||
func newChangeWriter(w io.Writer, source string) *changeWriter {
|
||||
return &changeWriter{
|
||||
// NewChangeWriter returns ChangeWriter that writes tar stream of the source directory
|
||||
// to the privided writer. Change information (add/modify/delete/unmodified) for each
|
||||
// file needs to be passed through HandleChange method.
|
||||
func NewChangeWriter(w io.Writer, source string) *ChangeWriter {
|
||||
return &ChangeWriter{
|
||||
tw: tar.NewWriter(w),
|
||||
source: source,
|
||||
whiteoutT: time.Now(),
|
||||
|
@ -481,7 +515,10 @@ func newChangeWriter(w io.Writer, source string) *changeWriter {
|
|||
}
|
||||
}
|
||||
|
||||
func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
|
||||
// HandleChange receives filesystem change information and reflect that information to
|
||||
// the result tar stream. This function implements `fs.ChangeFunc` of continuity
|
||||
// (github.com/containerd/continuity/fs) and should be used with that package.
|
||||
func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -501,7 +538,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write whiteout header")
|
||||
return fmt.Errorf("failed to write whiteout header: %w", err)
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
|
@ -519,7 +556,8 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
}
|
||||
}
|
||||
|
||||
hdr, err := tar.FileInfoHeader(f, link)
|
||||
// Use FileInfoHeaderNoLookups to avoid propagating user names and group names from the host
|
||||
hdr, err := tarheader.FileInfoHeaderNoLookups(f, link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -536,12 +574,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
if strings.HasPrefix(name, string(filepath.Separator)) {
|
||||
name, err = filepath.Rel(string(filepath.Separator), name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make path relative")
|
||||
return fmt.Errorf("failed to make path relative: %w", err)
|
||||
}
|
||||
}
|
||||
name, err = tarName(name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot canonicalize path")
|
||||
return fmt.Errorf("cannot canonicalize path: %w", err)
|
||||
}
|
||||
// suffix with '/' for directories
|
||||
if f.IsDir() && !strings.HasSuffix(name, "/") {
|
||||
|
@ -550,7 +588,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
hdr.Name = name
|
||||
|
||||
if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
|
||||
return errors.Wrap(err, "failed to set device headers")
|
||||
return fmt.Errorf("failed to set device headers: %w", err)
|
||||
}
|
||||
|
||||
// additionalLinks stores file names which must be linked to
|
||||
|
@ -578,8 +616,8 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
}
|
||||
|
||||
if capability, err := getxattr(source, "security.capability"); err != nil {
|
||||
return errors.Wrap(err, "failed to get capabilities xattr")
|
||||
} else if capability != nil {
|
||||
return fmt.Errorf("failed to get capabilities xattr: %w", err)
|
||||
} else if len(capability) > 0 {
|
||||
if hdr.PAXRecords == nil {
|
||||
hdr.PAXRecords = map[string]string{}
|
||||
}
|
||||
|
@ -590,19 +628,19 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
return fmt.Errorf("failed to write file header: %w", err)
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
file, err := open(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open path: %v", source)
|
||||
return fmt.Errorf("failed to open path: %v: %w", source, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
n, err := copyBuffered(context.TODO(), cw.tw, file)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if n != hdr.Size {
|
||||
return errors.New("short write copying file")
|
||||
|
@ -621,7 +659,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
return fmt.Errorf("failed to write file header: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -629,14 +667,15 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cw *changeWriter) Close() error {
|
||||
// Close closes this writer.
|
||||
func (cw *ChangeWriter) Close() error {
|
||||
if err := cw.tw.Close(); err != nil {
|
||||
return errors.Wrap(err, "failed to close tar writer")
|
||||
return fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *changeWriter) includeParents(hdr *tar.Header) error {
|
||||
func (cw *ChangeWriter) includeParents(hdr *tar.Header) error {
|
||||
if cw.addedDirs == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -744,7 +783,7 @@ func validateWhiteout(path string) error {
|
|||
dir += string(filepath.Separator)
|
||||
}
|
||||
if !strings.HasPrefix(originalPath, dir) {
|
||||
return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
|
||||
return fmt.Errorf("invalid whiteout name: %v: %w", base, errInvalidArchive)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -23,7 +21,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -34,17 +31,12 @@ import (
|
|||
"github.com/containerd/containerd/snapshots/overlay/overlayutils"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestOverlayApply(t *testing.T) {
|
||||
testutil.RequiresRoot(t)
|
||||
|
||||
base, err := ioutil.TempDir("", "test-ovl-diff-apply-")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create temp dir: %+v", err)
|
||||
}
|
||||
defer os.RemoveAll(base)
|
||||
base := t.TempDir()
|
||||
|
||||
if err := overlayutils.Supported(base); err != nil {
|
||||
t.Skipf("skipping because overlay is not supported %v", err)
|
||||
|
@ -59,11 +51,7 @@ func TestOverlayApply(t *testing.T) {
|
|||
func TestOverlayApplyNoParents(t *testing.T) {
|
||||
testutil.RequiresRoot(t)
|
||||
|
||||
base, err := ioutil.TempDir("", "test-ovl-diff-apply-")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create temp dir: %+v", err)
|
||||
}
|
||||
defer os.RemoveAll(base)
|
||||
base := t.TempDir()
|
||||
|
||||
if err := overlayutils.Supported(base); err != nil {
|
||||
t.Skipf("skipping because overlay is not supported %v", err)
|
||||
|
@ -71,11 +59,11 @@ func TestOverlayApplyNoParents(t *testing.T) {
|
|||
fstest.FSSuite(t, overlayDiffApplier{
|
||||
tmp: base,
|
||||
diff: func(ctx context.Context, w io.Writer, a, b string, _ ...WriteDiffOpt) error {
|
||||
cw := newChangeWriter(w, b)
|
||||
cw := NewChangeWriter(w, b)
|
||||
cw.addedDirs = nil
|
||||
err := fs.Changes(ctx, a, b, cw.HandleChange)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create diff tar stream")
|
||||
return fmt.Errorf("failed to create diff tar stream: %w", err)
|
||||
}
|
||||
return cw.Close()
|
||||
},
|
||||
|
@ -98,9 +86,9 @@ type overlayContext struct {
|
|||
type contextKey struct{}
|
||||
|
||||
func (d overlayDiffApplier) TestContext(ctx context.Context) (context.Context, func(), error) {
|
||||
merged, err := ioutil.TempDir(d.tmp, "merged")
|
||||
merged, err := os.MkdirTemp(d.tmp, "merged")
|
||||
if err != nil {
|
||||
return ctx, nil, errors.Wrap(err, "failed to make merged dir")
|
||||
return ctx, nil, fmt.Errorf("failed to make merged dir: %w", err)
|
||||
}
|
||||
|
||||
oc := &overlayContext{
|
||||
|
@ -119,9 +107,9 @@ func (d overlayDiffApplier) TestContext(ctx context.Context) (context.Context, f
|
|||
func (d overlayDiffApplier) Apply(ctx context.Context, a fstest.Applier) (string, func(), error) {
|
||||
oc := ctx.Value(contextKey{}).(*overlayContext)
|
||||
|
||||
applyCopy, err := ioutil.TempDir(d.tmp, "apply-copy-")
|
||||
applyCopy, err := os.MkdirTemp(d.tmp, "apply-copy-")
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to create temp dir")
|
||||
return "", nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(applyCopy)
|
||||
|
||||
|
@ -131,33 +119,33 @@ func (d overlayDiffApplier) Apply(ctx context.Context, a fstest.Applier) (string
|
|||
}
|
||||
|
||||
if err = fs.CopyDir(applyCopy, base); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to copy base")
|
||||
return "", nil, fmt.Errorf("failed to copy base: %w", err)
|
||||
}
|
||||
|
||||
if err := a.Apply(applyCopy); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to apply changes to copy of base")
|
||||
return "", nil, fmt.Errorf("failed to apply changes to copy of base: %w", err)
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
if err := d.diff(ctx, buf, base, applyCopy); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to create diff")
|
||||
return "", nil, fmt.Errorf("failed to create diff: %w", err)
|
||||
}
|
||||
|
||||
if oc.mounted {
|
||||
if err := mount.Unmount(oc.merged, 0); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to unmount")
|
||||
return "", nil, fmt.Errorf("failed to unmount: %w", err)
|
||||
}
|
||||
oc.mounted = false
|
||||
}
|
||||
|
||||
next, err := ioutil.TempDir(d.tmp, "lower-")
|
||||
next, err := os.MkdirTemp(d.tmp, "lower-")
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to create temp dir")
|
||||
return "", nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
|
||||
if _, err = Apply(ctx, next, buf, WithConvertWhiteout(OverlayConvertWhiteout), WithParents(oc.lowers)); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to apply tar stream")
|
||||
return "", nil, fmt.Errorf("failed to apply tar stream: %w", err)
|
||||
}
|
||||
|
||||
oc.lowers = append([]string{next}, oc.lowers...)
|
||||
|
@ -175,7 +163,7 @@ func (d overlayDiffApplier) Apply(ctx context.Context, a fstest.Applier) (string
|
|||
}
|
||||
|
||||
if err := m.Mount(oc.merged); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to mount: %v", m)
|
||||
return "", nil, fmt.Errorf("failed to mount: %v: %w", m, err)
|
||||
}
|
||||
oc.mounted = true
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows && !freebsd
|
||||
// +build !windows,!freebsd
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -26,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
|
||||
func applyWindowsLayer(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
|
||||
return ociwclayer.ImportLayerFromTar(ctx, r, root, options.Parents)
|
||||
}
|
||||
|
@ -47,7 +45,7 @@ func AsWindowsContainerLayer() ApplyOpt {
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func writeDiffWindowsLayers(ctx context.Context, w io.Writer, _, layer string, options WriteDiffOptions) error {
|
||||
return ociwclayer.ExportLayerToTar(ctx, w, layer, options.ParentLayers)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
/*
|
||||
|
@ -22,22 +23,20 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
_ "crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "crypto/sha256"
|
||||
|
||||
"github.com/containerd/containerd/archive/tartest"
|
||||
"github.com/containerd/containerd/pkg/testutil"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/pkg/errors"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
const tarCmd = "tar"
|
||||
|
@ -58,7 +57,7 @@ var baseApplier = fstest.Apply(
|
|||
func TestUnpack(t *testing.T) {
|
||||
requireTar(t)
|
||||
|
||||
if err := testApply(baseApplier); err != nil {
|
||||
if err := testApply(t, baseApplier); err != nil {
|
||||
t.Fatalf("Test apply failed: %+v", err)
|
||||
}
|
||||
}
|
||||
|
@ -66,7 +65,7 @@ func TestUnpack(t *testing.T) {
|
|||
func TestBaseDiff(t *testing.T) {
|
||||
requireTar(t)
|
||||
|
||||
if err := testBaseDiff(baseApplier); err != nil {
|
||||
if err := testBaseDiff(t, baseApplier); err != nil {
|
||||
t.Fatalf("Test base diff failed: %+v", err)
|
||||
}
|
||||
}
|
||||
|
@ -102,7 +101,7 @@ func TestRelativeSymlinks(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, bo := range breakoutLinks {
|
||||
if err := testDiffApply(bo); err != nil {
|
||||
if err := testDiffApply(t, bo); err != nil {
|
||||
t.Fatalf("Test apply failed: %+v", err)
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +178,7 @@ func TestSymlinks(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, l := range links {
|
||||
if err := testDiffApply(l[0], l[1]); err != nil {
|
||||
if err := testDiffApply(t, l[0], l[1]); err != nil {
|
||||
t.Fatalf("Test[%d] apply failed: %+v", i+1, err)
|
||||
}
|
||||
}
|
||||
|
@ -233,21 +232,17 @@ func TestBreakouts(t *testing.T) {
|
|||
tc := tartest.TarContext{}.WithUIDGID(os.Getuid(), os.Getgid()).WithModTime(time.Now().UTC())
|
||||
expected := "unbroken"
|
||||
unbrokenCheck := func(root string) error {
|
||||
b, err := ioutil.ReadFile(filepath.Join(root, "etc", "unbroken"))
|
||||
b, err := os.ReadFile(filepath.Join(root, "etc", "unbroken"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read unbroken")
|
||||
return fmt.Errorf("failed to read unbroken: %w", err)
|
||||
}
|
||||
if string(b) != expected {
|
||||
return errors.Errorf("/etc/unbroken: unexpected value %s, expected %s", b, expected)
|
||||
return fmt.Errorf("/etc/unbroken: unexpected value %s, expected %s", b, expected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
errFileDiff := errors.New("files differ")
|
||||
td, err := ioutil.TempDir("", "test-breakouts-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
td := t.TempDir()
|
||||
|
||||
isSymlinkFile := func(f string) func(string) error {
|
||||
return func(root string) error {
|
||||
|
@ -257,7 +252,7 @@ func TestBreakouts(t *testing.T) {
|
|||
}
|
||||
|
||||
if got := fi.Mode() & os.ModeSymlink; got != os.ModeSymlink {
|
||||
return errors.Errorf("%s should be symlink", fi.Name())
|
||||
return fmt.Errorf("%s should be symlink", fi.Name())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -285,7 +280,7 @@ func TestBreakouts(t *testing.T) {
|
|||
}
|
||||
|
||||
if t1 != t2 {
|
||||
return errors.Wrapf(errFileDiff, "%#v and %#v", t1, t2)
|
||||
return fmt.Errorf("%#v and %#v: %w", t1, t2, errFileDiff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -310,7 +305,7 @@ func TestBreakouts(t *testing.T) {
|
|||
return err
|
||||
}
|
||||
if !os.SameFile(s1, s2) {
|
||||
return errors.Wrapf(errFileDiff, "%#v and %#v", s1, s2)
|
||||
return fmt.Errorf("%#v and %#v: %w", s1, s2, errFileDiff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -330,12 +325,12 @@ func TestBreakouts(t *testing.T) {
|
|||
}
|
||||
fileValue := func(f1 string, content []byte) func(string) error {
|
||||
return func(root string) error {
|
||||
b, err := ioutil.ReadFile(filepath.Join(root, f1))
|
||||
b, err := os.ReadFile(filepath.Join(root, f1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(b, content) {
|
||||
return errors.Errorf("content differs: expected %v, got %v", content, b)
|
||||
return fmt.Errorf("content differs: expected %v, got %v", content, b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -420,12 +415,12 @@ func TestBreakouts(t *testing.T) {
|
|||
tc.File("/localetc/emptied", []byte{}, 0644),
|
||||
),
|
||||
validator: func(root string) error {
|
||||
b, err := ioutil.ReadFile(filepath.Join(root, "etc", "emptied"))
|
||||
b, err := os.ReadFile(filepath.Join(root, "etc", "emptied"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read unbroken")
|
||||
return fmt.Errorf("failed to read unbroken: %w", err)
|
||||
}
|
||||
if len(b) > 0 {
|
||||
return errors.Errorf("/etc/emptied: non-empty")
|
||||
return errors.New("/etc/emptied: non-empty")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -754,11 +749,11 @@ func TestBreakouts(t *testing.T) {
|
|||
name: "HardlinkSymlinkChmod",
|
||||
w: func() tartest.WriterToTar {
|
||||
p := filepath.Join(td, "perm400")
|
||||
if err := ioutil.WriteFile(p, []byte("..."), 0400); err != nil {
|
||||
if err := os.WriteFile(p, []byte("..."), 0400); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ep := filepath.Join(td, "also-exists-outside-root")
|
||||
if err := ioutil.WriteFile(ep, []byte("..."), 0640); err != nil {
|
||||
if err := os.WriteFile(ep, []byte("..."), 0640); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -774,7 +769,7 @@ func TestBreakouts(t *testing.T) {
|
|||
return err
|
||||
}
|
||||
if perm := fi.Mode() & os.ModePerm; perm != 0400 {
|
||||
return errors.Errorf("%s perm changed from 0400 to %04o", p, perm)
|
||||
return fmt.Errorf("%s perm changed from 0400 to %04o", p, perm)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -800,7 +795,7 @@ func TestApplyTar(t *testing.T) {
|
|||
return err
|
||||
}
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
return errors.Wrapf(err, "failure checking existence for %v", d)
|
||||
return fmt.Errorf("failure checking existence for %v: %w", d, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -841,26 +836,18 @@ func TestApplyTar(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testApply(a fstest.Applier) error {
|
||||
td, err := ioutil.TempDir("", "test-apply-")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
dest, err := ioutil.TempDir("", "test-apply-dest-")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
defer os.RemoveAll(dest)
|
||||
func testApply(t *testing.T, a fstest.Applier) error {
|
||||
td := t.TempDir()
|
||||
dest := t.TempDir()
|
||||
|
||||
if err := a.Apply(td); err != nil {
|
||||
return errors.Wrap(err, "failed to apply filesystem changes")
|
||||
return fmt.Errorf("failed to apply filesystem changes: %w", err)
|
||||
}
|
||||
|
||||
tarArgs := []string{"c", "-C", td}
|
||||
names, err := readDirNames(td)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read directory names")
|
||||
return fmt.Errorf("failed to read directory names: %w", err)
|
||||
}
|
||||
tarArgs = append(tarArgs, names...)
|
||||
|
||||
|
@ -868,34 +855,26 @@ func testApply(a fstest.Applier) error {
|
|||
|
||||
arch, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create stdout pipe")
|
||||
return fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return errors.Wrap(err, "failed to start command")
|
||||
return fmt.Errorf("failed to start command: %w", err)
|
||||
}
|
||||
|
||||
if _, err := Apply(context.Background(), dest, arch); err != nil {
|
||||
return errors.Wrap(err, "failed to apply tar stream")
|
||||
return fmt.Errorf("failed to apply tar stream: %w", err)
|
||||
}
|
||||
|
||||
return fstest.CheckDirectoryEqual(td, dest)
|
||||
}
|
||||
|
||||
func testBaseDiff(a fstest.Applier) error {
|
||||
td, err := ioutil.TempDir("", "test-base-diff-")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
dest, err := ioutil.TempDir("", "test-base-diff-dest-")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
defer os.RemoveAll(dest)
|
||||
func testBaseDiff(t *testing.T, a fstest.Applier) error {
|
||||
td := t.TempDir()
|
||||
dest := t.TempDir()
|
||||
|
||||
if err := a.Apply(td); err != nil {
|
||||
return errors.Wrap(err, "failed to apply filesystem changes")
|
||||
return fmt.Errorf("failed to apply filesystem changes: %w", err)
|
||||
}
|
||||
|
||||
arch := Diff(context.Background(), "", td)
|
||||
|
@ -903,27 +882,19 @@ func testBaseDiff(a fstest.Applier) error {
|
|||
cmd := exec.Command(tarCmd, "x", "-C", dest)
|
||||
cmd.Stdin = arch
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.Wrap(err, "tar command failed")
|
||||
return fmt.Errorf("tar command failed: %w", err)
|
||||
}
|
||||
|
||||
return fstest.CheckDirectoryEqual(td, dest)
|
||||
}
|
||||
|
||||
func testDiffApply(appliers ...fstest.Applier) error {
|
||||
td, err := ioutil.TempDir("", "test-diff-apply-")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
dest, err := ioutil.TempDir("", "test-diff-apply-dest-")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
defer os.RemoveAll(dest)
|
||||
func testDiffApply(t *testing.T, appliers ...fstest.Applier) error {
|
||||
td := t.TempDir()
|
||||
dest := t.TempDir()
|
||||
|
||||
for _, a := range appliers {
|
||||
if err := a.Apply(td); err != nil {
|
||||
return errors.Wrap(err, "failed to apply filesystem changes")
|
||||
return fmt.Errorf("failed to apply filesystem changes: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -931,18 +902,18 @@ func testDiffApply(appliers ...fstest.Applier) error {
|
|||
if len(appliers) > 1 {
|
||||
for _, a := range appliers[:len(appliers)-1] {
|
||||
if err := a.Apply(dest); err != nil {
|
||||
return errors.Wrap(err, "failed to apply base filesystem changes")
|
||||
return fmt.Errorf("failed to apply base filesystem changes: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
diffBytes, err := ioutil.ReadAll(Diff(context.Background(), dest, td))
|
||||
diffBytes, err := io.ReadAll(Diff(context.Background(), dest, td))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create diff")
|
||||
return fmt.Errorf("failed to create diff: %w", err)
|
||||
}
|
||||
|
||||
if _, err := Apply(context.Background(), dest, bytes.NewReader(diffBytes)); err != nil {
|
||||
return errors.Wrap(err, "failed to apply tar stream")
|
||||
return fmt.Errorf("failed to apply tar stream: %w", err)
|
||||
}
|
||||
|
||||
return fstest.CheckDirectoryEqual(td, dest)
|
||||
|
@ -950,11 +921,7 @@ func testDiffApply(appliers ...fstest.Applier) error {
|
|||
|
||||
func makeWriterToTarTest(wt tartest.WriterToTar, a fstest.Applier, validate func(string) error, applyErr error) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
td, err := ioutil.TempDir("", "test-writer-to-tar-")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
td := t.TempDir()
|
||||
|
||||
if a != nil {
|
||||
if err := a.Apply(td); err != nil {
|
||||
|
@ -1194,10 +1161,10 @@ func dirEntry(name string, mode int) tarEntryValidator {
|
|||
return errors.New("not directory type")
|
||||
}
|
||||
if hdr.Name != name {
|
||||
return errors.Errorf("wrong name %q, expected %q", hdr.Name, name)
|
||||
return fmt.Errorf("wrong name %q, expected %q", hdr.Name, name)
|
||||
}
|
||||
if hdr.Mode != int64(mode) {
|
||||
return errors.Errorf("wrong mode %o, expected %o", hdr.Mode, mode)
|
||||
return fmt.Errorf("wrong mode %o, expected %o", hdr.Mode, mode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1209,13 +1176,13 @@ func fileEntry(name string, expected []byte, mode int) tarEntryValidator {
|
|||
return errors.New("not file type")
|
||||
}
|
||||
if hdr.Name != name {
|
||||
return errors.Errorf("wrong name %q, expected %q", hdr.Name, name)
|
||||
return fmt.Errorf("wrong name %q, expected %q", hdr.Name, name)
|
||||
}
|
||||
if hdr.Mode != int64(mode) {
|
||||
return errors.Errorf("wrong mode %o, expected %o", hdr.Mode, mode)
|
||||
return fmt.Errorf("wrong mode %o, expected %o", hdr.Mode, mode)
|
||||
}
|
||||
if !bytes.Equal(b, expected) {
|
||||
return errors.Errorf("different file content")
|
||||
return errors.New("different file content")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1227,10 +1194,10 @@ func linkEntry(name, link string) tarEntryValidator {
|
|||
return errors.New("not link type")
|
||||
}
|
||||
if hdr.Name != name {
|
||||
return errors.Errorf("wrong name %q, expected %q", hdr.Name, name)
|
||||
return fmt.Errorf("wrong name %q, expected %q", hdr.Name, name)
|
||||
}
|
||||
if hdr.Linkname != link {
|
||||
return errors.Errorf("wrong link %q, expected %q", hdr.Linkname, link)
|
||||
return fmt.Errorf("wrong link %q, expected %q", hdr.Linkname, link)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1243,10 +1210,10 @@ func whiteoutEntry(name string) tarEntryValidator {
|
|||
|
||||
return func(hdr *tar.Header, b []byte) error {
|
||||
if hdr.Typeflag != tar.TypeReg {
|
||||
return errors.Errorf("not file type: %q", hdr.Typeflag)
|
||||
return fmt.Errorf("not file type: %q", hdr.Typeflag)
|
||||
}
|
||||
if hdr.Name != whiteOut {
|
||||
return errors.Errorf("wrong name %q, expected whiteout %q", hdr.Name, name)
|
||||
return fmt.Errorf("wrong name %q, expected whiteout %q", hdr.Name, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1254,20 +1221,12 @@ func whiteoutEntry(name string) tarEntryValidator {
|
|||
|
||||
func makeDiffTarTest(validators []tarEntryValidator, a, b fstest.Applier) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ad, err := ioutil.TempDir("", "test-make-diff-tar-")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(ad)
|
||||
ad := t.TempDir()
|
||||
if err := a.Apply(ad); err != nil {
|
||||
t.Fatalf("failed to apply a: %v", err)
|
||||
}
|
||||
|
||||
bd, err := ioutil.TempDir("", "test-make-diff-tar-")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(bd)
|
||||
bd := t.TempDir()
|
||||
if err := fs.CopyDir(bd, ad); err != nil {
|
||||
t.Fatalf("failed to copy dir: %v", err)
|
||||
}
|
||||
|
@ -1289,7 +1248,7 @@ func makeDiffTarTest(validators []tarEntryValidator, a, b fstest.Applier) func(*
|
|||
}
|
||||
var b []byte
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
b, err = ioutil.ReadAll(tr)
|
||||
b, err = io.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatalf("tar read file error: %v", err)
|
||||
}
|
||||
|
@ -1307,9 +1266,9 @@ func makeDiffTarTest(validators []tarEntryValidator, a, b fstest.Applier) func(*
|
|||
type diffApplier struct{}
|
||||
|
||||
func (d diffApplier) TestContext(ctx context.Context) (context.Context, func(), error) {
|
||||
base, err := ioutil.TempDir("", "test-diff-apply-")
|
||||
base, err := os.MkdirTemp("", "test-diff-apply-")
|
||||
if err != nil {
|
||||
return ctx, nil, errors.Wrap(err, "failed to create temp dir")
|
||||
return ctx, nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
return context.WithValue(ctx, d, base), func() {
|
||||
os.RemoveAll(base)
|
||||
|
@ -1319,32 +1278,32 @@ func (d diffApplier) TestContext(ctx context.Context) (context.Context, func(),
|
|||
func (d diffApplier) Apply(ctx context.Context, a fstest.Applier) (string, func(), error) {
|
||||
base := ctx.Value(d).(string)
|
||||
|
||||
applyCopy, err := ioutil.TempDir("", "test-diffapply-apply-copy-")
|
||||
applyCopy, err := os.MkdirTemp("", "test-diffapply-apply-copy-")
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to create temp dir")
|
||||
return "", nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(applyCopy)
|
||||
if err = fs.CopyDir(applyCopy, base); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to copy base")
|
||||
return "", nil, fmt.Errorf("failed to copy base: %w", err)
|
||||
}
|
||||
if err := a.Apply(applyCopy); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to apply changes to copy of base")
|
||||
return "", nil, fmt.Errorf("failed to apply changes to copy of base: %w", err)
|
||||
}
|
||||
|
||||
diffBytes, err := ioutil.ReadAll(Diff(ctx, base, applyCopy))
|
||||
diffBytes, err := io.ReadAll(Diff(ctx, base, applyCopy))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to create diff")
|
||||
return "", nil, fmt.Errorf("failed to create diff: %w", err)
|
||||
}
|
||||
|
||||
if _, err = Apply(ctx, base, bytes.NewReader(diffBytes)); err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to apply tar stream")
|
||||
return "", nil, fmt.Errorf("failed to apply tar stream: %w", err)
|
||||
}
|
||||
|
||||
return base, nil, nil
|
||||
}
|
||||
|
||||
func readDirNames(p string) ([]string, error) {
|
||||
fis, err := ioutil.ReadDir(p)
|
||||
fis, err := os.ReadDir(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -20,14 +21,16 @@ package archive
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/pkg/userns"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -40,13 +43,26 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error {
|
||||
// Devmajor and Devminor are only needed for special devices.
|
||||
|
||||
// In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
|
||||
// https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
|
||||
// (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
|
||||
|
||||
// ZFS in particular does not override the default:
|
||||
// https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
|
||||
|
||||
// Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
|
||||
// Such large values cannot be encoded in a tar header.
|
||||
if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
|
||||
return nil
|
||||
}
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
|
||||
// Rdev is int32 on darwin/bsd, int64 on linux/solaris
|
||||
rdev := uint64(s.Rdev) // nolint: unconvert
|
||||
rdev := uint64(s.Rdev) //nolint:nolintlint,unconvert // rdev is int32 on darwin/bsd, int64 on linux/solaris
|
||||
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||
|
@ -69,6 +85,7 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
|
|||
}
|
||||
// Call chmod to avoid permission mask
|
||||
if err := os.Chmod(name, perm); err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return f, err
|
||||
|
@ -122,7 +139,7 @@ func getxattr(path, attr string) ([]byte, error) {
|
|||
func setxattr(path, key, value string) error {
|
||||
// Do not set trusted attributes
|
||||
if strings.HasPrefix(key, "trusted.") {
|
||||
return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported")
|
||||
return fmt.Errorf("admin attributes from archive not supported: %w", unix.ENOTSUP)
|
||||
}
|
||||
return unix.Lsetxattr(path, key, []byte(value), 0)
|
||||
}
|
||||
|
@ -142,12 +159,12 @@ func copyDirInfo(fi os.FileInfo, path string) error {
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", path)
|
||||
return fmt.Errorf("failed to chown %s: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", path)
|
||||
return fmt.Errorf("failed to chmod %s: %w", path, err)
|
||||
}
|
||||
|
||||
timespec := []unix.Timespec{
|
||||
|
@ -155,7 +172,7 @@ func copyDirInfo(fi os.FileInfo, path string) error {
|
|||
unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatMtime(st))),
|
||||
}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", path)
|
||||
return fmt.Errorf("failed to utime %s: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -167,7 +184,7 @@ func copyUpXAttrs(dst, src string) error {
|
|||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
return fmt.Errorf("failed to list xattrs on %s: %w", src, err)
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
// Do not copy up trusted attributes
|
||||
|
@ -179,10 +196,10 @@ func copyUpXAttrs(dst, src string) error {
|
|||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
return fmt.Errorf("failed to get xattr %q on %s: %w", xattr, src, err)
|
||||
}
|
||||
if err := lsetxattrCreate(dst, xattr, data); err != nil {
|
||||
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
return fmt.Errorf("failed to set xattr %q on %s: %w", xattr, dst, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -20,12 +18,12 @@ package archive
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// tarName returns platform-specific filepath
|
||||
|
@ -114,7 +112,7 @@ func setxattr(path, key, value string) error {
|
|||
|
||||
func copyDirInfo(fi os.FileInfo, path string) error {
|
||||
if err := os.Chmod(path, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", path)
|
||||
return fmt.Errorf("failed to chmod %s: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Portions from https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L419-L464
|
||||
Copyright (C) Docker/Moby authors.
|
||||
Licensed under the Apache License, Version 2.0
|
||||
NOTICE: https://github.com/moby/moby/blob/v23.0.1/NOTICE
|
||||
*/
|
||||
|
||||
package tarheader
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
)
|
||||
|
||||
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
|
||||
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
|
||||
// glibc.
|
||||
//
|
||||
// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L419-L434 .
|
||||
type nosysFileInfo struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (fi nosysFileInfo) Sys() interface{} {
|
||||
// A Sys value of type *tar.Header is safe as it is system-independent.
|
||||
// The tar.FileInfoHeader function copies the fields into the returned
|
||||
// header without performing any OS lookups.
|
||||
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
|
||||
return sys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates hdr from system-dependent fields of fi.
|
||||
//
|
||||
// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L436-L437 .
|
||||
var sysStat func(fi os.FileInfo, hdr *tar.Header) error
|
||||
|
||||
// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
|
||||
//
|
||||
// Compared to the archive/tar.FileInfoHeader function, this function is safe to
|
||||
// call from a chrooted process as it does not populate fields which would
|
||||
// require operating system lookups. It behaves identically to
|
||||
// tar.FileInfoHeader when fi is a FileInfo value returned from
|
||||
// tar.Header.FileInfo().
|
||||
//
|
||||
// When fi is a FileInfo for a native file, such as returned from os.Stat() and
|
||||
// os.Lstat(), the returned Header value differs from one returned from
|
||||
// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
|
||||
// set as OS lookups would be required to populate them. The AccessTime and
|
||||
// ChangeTime fields are not currently set (not yet implemented) although that
|
||||
// is subject to change. Callers which require the AccessTime or ChangeTime
|
||||
// fields to be zeroed should explicitly zero them out in the returned Header
|
||||
// value to avoid any compatibility issues in the future.
|
||||
//
|
||||
// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L439-L464 .
|
||||
func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
|
||||
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sysStat != nil {
|
||||
return hdr, sysStat(fi, hdr)
|
||||
}
|
||||
return hdr, nil
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Portions from https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive_unix.go#L52-L70
|
||||
Copyright (C) Docker/Moby authors.
|
||||
Licensed under the Apache License, Version 2.0
|
||||
NOTICE: https://github.com/moby/moby/blob/v23.0.1/NOTICE
|
||||
*/
|
||||
|
||||
package tarheader
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
// statUnix populates hdr from system-dependent fields of fi without performing
|
||||
// any OS lookups.
|
||||
// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive_unix.go#L52-L70
|
||||
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
hdr.Uid = int(s.Uid)
|
||||
hdr.Gid = int(s.Gid)
|
||||
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev)))
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev)))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -19,11 +20,10 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func chtimes(path string, atime, mtime time.Time) error {
|
||||
|
@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error {
|
|||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path)
|
||||
return fmt.Errorf("failed call to UtimesNanoAt for %s: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -22,7 +23,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -49,9 +49,7 @@ func TestNewFIFOSetInDir(t *testing.T) {
|
|||
t.Skip("NewFIFOSetInDir has different behaviour on windows")
|
||||
}
|
||||
|
||||
root, err := ioutil.TempDir("", "test-new-fifo-set")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(root)
|
||||
root := t.TempDir()
|
||||
|
||||
fifos, err := NewFIFOSetInDir(root, "theid", true)
|
||||
assert.NilError(t, err)
|
||||
|
@ -68,12 +66,12 @@ func TestNewFIFOSetInDir(t *testing.T) {
|
|||
}
|
||||
assert.Assert(t, is.DeepEqual(fifos, expected, cmpFIFOSet))
|
||||
|
||||
files, err := ioutil.ReadDir(root)
|
||||
files, err := os.ReadDir(root)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Len(files, 1))
|
||||
|
||||
assert.NilError(t, fifos.Close())
|
||||
files, err = ioutil.ReadDir(root)
|
||||
files, err = os.ReadDir(root)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Len(files, 0))
|
||||
}
|
||||
|
@ -101,19 +99,19 @@ func TestNewAttach(t *testing.T) {
|
|||
fifos, err := NewFIFOSetInDir("", "theid", false)
|
||||
assert.NilError(t, err)
|
||||
|
||||
io, err := attacher(fifos)
|
||||
attachedFifos, err := attacher(fifos)
|
||||
assert.NilError(t, err)
|
||||
defer io.Close()
|
||||
defer attachedFifos.Close()
|
||||
|
||||
producers := setupFIFOProducers(t, io.Config())
|
||||
producers := setupFIFOProducers(t, attachedFifos.Config())
|
||||
initProducers(t, producers, expectedStdout, expectedStderr)
|
||||
|
||||
actualStdin, err := ioutil.ReadAll(producers.Stdin)
|
||||
actualStdin, err := io.ReadAll(producers.Stdin)
|
||||
assert.NilError(t, err)
|
||||
|
||||
io.Wait()
|
||||
io.Cancel()
|
||||
assert.NilError(t, io.Close())
|
||||
attachedFifos.Wait()
|
||||
attachedFifos.Cancel()
|
||||
assert.NilError(t, attachedFifos.Close())
|
||||
|
||||
assert.Check(t, is.Equal(expectedStdout, stdout.String()))
|
||||
assert.Check(t, is.Equal(expectedStderr, stderr.String()))
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -20,15 +21,14 @@ package cio
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/fifo"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root
|
||||
|
@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) {
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
dir, err := ioutil.TempDir(root, "")
|
||||
dir, err := os.MkdirTemp(root, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
|
|||
|
||||
if fifos.Stdin != "" {
|
||||
if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stdin fifo")
|
||||
return f, fmt.Errorf("failed to open stdin fifo: %w", retErr)
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil && f.Stdin != nil {
|
||||
|
@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
|
|||
}
|
||||
if fifos.Stdout != "" {
|
||||
if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stdout fifo")
|
||||
return f, fmt.Errorf("failed to open stdout fifo: %w", retErr)
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil && f.Stdout != nil {
|
||||
|
@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
|
|||
}
|
||||
if !fifos.Terminal && fifos.Stderr != "" {
|
||||
if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stderr fifo")
|
||||
return f, fmt.Errorf("failed to open stderr fifo: %w", retErr)
|
||||
}
|
||||
}
|
||||
return f, nil
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -20,9 +21,6 @@ package cio
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -65,11 +63,7 @@ func TestOpenFifosWithTerminal(t *testing.T) {
|
|||
var ctx, cancel = context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ioFifoDir, err := ioutil.TempDir("", fmt.Sprintf("cio-%s", t.Name()))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error during creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(ioFifoDir)
|
||||
ioFifoDir := t.TempDir()
|
||||
|
||||
cfg := Config{
|
||||
Stdout: filepath.Join(ioFifoDir, "test-stdout"),
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const pipeRoot = `\\.\pipe`
|
||||
|
@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
|
|||
if fifos.Stdin != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stdin, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin)
|
||||
return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
|
@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
|
|||
if fifos.Stdout != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stdout, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout)
|
||||
return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
|
@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
|
|||
if fifos.Stderr != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stderr, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr)
|
||||
return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
51
client.go
51
client.go
|
@ -21,7 +21,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -62,10 +61,10 @@ import (
|
|||
ptypes "github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
||||
|
@ -119,31 +118,33 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||
}
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.FailOnNonTempDialError(true),
|
||||
grpc.WithConnectParams(connParams),
|
||||
grpc.WithContextDialer(dialer.ContextDialer),
|
||||
|
||||
// TODO(stevvooe): We may need to allow configuration of this on the client.
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
||||
grpc.WithReturnConnectionError(),
|
||||
}
|
||||
if len(copts.dialOptions) > 0 {
|
||||
gopts = copts.dialOptions
|
||||
}
|
||||
gopts = append(gopts, grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize),
|
||||
grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)))
|
||||
if len(copts.callOptions) > 0 {
|
||||
gopts = append(gopts, grpc.WithDefaultCallOptions(copts.callOptions...))
|
||||
}
|
||||
if copts.defaultns != "" {
|
||||
unary, stream := newNSInterceptors(copts.defaultns)
|
||||
gopts = append(gopts,
|
||||
grpc.WithUnaryInterceptor(unary),
|
||||
grpc.WithStreamInterceptor(stream),
|
||||
)
|
||||
gopts = append(gopts, grpc.WithChainUnaryInterceptor(unary))
|
||||
gopts = append(gopts, grpc.WithChainStreamInterceptor(stream))
|
||||
}
|
||||
|
||||
connector := func() (*grpc.ClientConn, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), copts.timeout)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||
return nil, fmt.Errorf("failed to dial %q: %w", address, err)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
@ -154,7 +155,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||
c.conn, c.connector = conn, connector
|
||||
}
|
||||
if copts.services == nil && c.conn == nil {
|
||||
return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available")
|
||||
return nil, fmt.Errorf("no grpc connection or services is available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
|
@ -214,7 +215,7 @@ type Client struct {
|
|||
// Reconnect re-establishes the GRPC connection to the containerd daemon
|
||||
func (c *Client) Reconnect() error {
|
||||
if c.connector == nil {
|
||||
return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available")
|
||||
return fmt.Errorf("unable to reconnect to containerd, no connector available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
|
@ -242,7 +243,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
|
|||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
return false, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
|
||||
|
@ -265,8 +266,8 @@ func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container
|
|||
return out, nil
|
||||
}
|
||||
|
||||
// NewContainer will create a new container in container with the provided id
|
||||
// the id must be unique within the namespace
|
||||
// NewContainer will create a new container with the provided id.
|
||||
// The id must be unique within the namespace.
|
||||
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
|
@ -369,9 +370,7 @@ type RemoteContext struct {
|
|||
|
||||
func defaultRemoteContext() *RemoteContext {
|
||||
return &RemoteContext{
|
||||
Resolver: docker.NewResolver(docker.ResolverOptions{
|
||||
Client: http.DefaultClient,
|
||||
}),
|
||||
Resolver: docker.NewResolver(docker.ResolverOptions{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -386,7 +385,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
|
|||
}
|
||||
|
||||
if fetchCtx.Unpack {
|
||||
return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull")
|
||||
return images.Image{}, fmt.Errorf("unpack on fetch not supported, try pull: %w", errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if fetchCtx.PlatformMatcher == nil {
|
||||
|
@ -397,7 +396,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
|
|||
for _, s := range fetchCtx.Platforms {
|
||||
p, err := platforms.Parse(s)
|
||||
if err != nil {
|
||||
return images.Image{}, errors.Wrapf(err, "invalid platform %s", s)
|
||||
return images.Image{}, fmt.Errorf("invalid platform %s: %w", s, err)
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
|
@ -433,7 +432,7 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
|
|||
for _, platform := range pushCtx.Platforms {
|
||||
p, err := platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid platform %s", platform)
|
||||
return fmt.Errorf("invalid platform %s: %w", platform, err)
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
|
@ -716,7 +715,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
|
|||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
return Version{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
|
||||
|
@ -739,7 +738,7 @@ func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
|
|||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
return ServerInfo{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
|
||||
|
@ -777,7 +776,7 @@ func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Sna
|
|||
|
||||
s := c.SnapshotService(name)
|
||||
if s == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name)
|
||||
return nil, fmt.Errorf("snapshotter %s was not found: %w", name, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
|
|
|
@ -34,6 +34,7 @@ type clientOpts struct {
|
|||
defaultPlatform platforms.MatchComparer
|
||||
services *services
|
||||
dialOptions []grpc.DialOption
|
||||
callOptions []grpc.CallOption
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
|
@ -75,6 +76,14 @@ func WithDialOpts(opts []grpc.DialOption) ClientOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// WithCallOpts allows grpc.CallOptions to be set on the connection
|
||||
func WithCallOpts(opts []grpc.CallOption) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.callOptions = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithServices sets services used by the client.
|
||||
func WithServices(opts ...ServicesOpt) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
@ -19,10 +20,13 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
v2 "github.com/containerd/containerd/runtime/v2/runc/v2"
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/runtime/v2/runc/manager"
|
||||
_ "github.com/containerd/containerd/runtime/v2/runc/task/plugin"
|
||||
"github.com/containerd/containerd/runtime/v2/shim"
|
||||
)
|
||||
|
||||
func main() {
|
||||
shim.Run("io.containerd.runc.v2", v2.New)
|
||||
shim.RunManager(context.Background(), manager.NewShimManager("io.containerd.runc.v2"))
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
|
@ -45,8 +45,8 @@ import (
|
|||
"github.com/containerd/ttrpc"
|
||||
"github.com/containerd/typeurl"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -153,7 +153,7 @@ func executeShim() error {
|
|||
}
|
||||
server, err := newServer()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed creating server")
|
||||
return fmt.Errorf("failed creating server: %w", err)
|
||||
}
|
||||
sv, err := shim.NewService(
|
||||
shim.Config{
|
||||
|
@ -211,7 +211,7 @@ func serve(ctx context.Context, server *ttrpc.Server, path string) error {
|
|||
p = abstractSocketPrefix + p
|
||||
}
|
||||
if len(p) > socketPathLimit {
|
||||
return errors.Errorf("%q: unix socket path too long (> %d)", p, socketPathLimit)
|
||||
return fmt.Errorf("%q: unix socket path too long (> %d)", p, socketPathLimit)
|
||||
}
|
||||
l, err = net.Listen("unix", p)
|
||||
}
|
||||
|
@ -307,12 +307,12 @@ func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status, err := reaper.Default.Wait(cmd, c)
|
||||
status, err := reaper.Default.WaitTimeout(cmd, c, 30*time.Second)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to publish event: %s", b.String())
|
||||
return fmt.Errorf("failed to publish event: %s: %w", b.String(), err)
|
||||
}
|
||||
if status != 0 {
|
||||
return errors.Errorf("failed to publish event: %s", b.String())
|
||||
return fmt.Errorf("failed to publish event: %s", b.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
|
@ -53,6 +52,7 @@ var densityCommand = cli.Command{
|
|||
Duration: cliContext.GlobalDuration("duration"),
|
||||
Concurrency: cliContext.GlobalInt("concurrent"),
|
||||
Exec: cliContext.GlobalBool("exec"),
|
||||
Image: cliContext.GlobalString("image"),
|
||||
JSON: cliContext.GlobalBool("json"),
|
||||
Metrics: cliContext.GlobalString("metrics"),
|
||||
Snapshotter: cliContext.GlobalString("snapshotter"),
|
||||
|
@ -66,8 +66,8 @@ var densityCommand = cli.Command{
|
|||
if err := cleanup(ctx, client); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("pulling %s", imageName)
|
||||
image, err := client.Pull(ctx, imageName, containerd.WithPullUnpack, containerd.WithPullSnapshotter(config.Snapshotter))
|
||||
logrus.Infof("pulling %s", config.Image)
|
||||
image, err := client.Pull(ctx, config.Image, containerd.WithPullUnpack, containerd.WithPullSnapshotter(config.Snapshotter))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -76,9 +76,6 @@ var densityCommand = cli.Command{
|
|||
s := make(chan os.Signal, 1)
|
||||
signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
pids []uint32
|
||||
count = cliContext.Int("count")
|
||||
|
@ -172,7 +169,7 @@ func getMaps(pid int) (map[string]int, error) {
|
|||
}
|
||||
|
||||
func getppid(pid int) (int, error) {
|
||||
bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
|
||||
bytes, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -63,6 +63,12 @@ func (w *execWorker) exec(ctx, tctx context.Context) {
|
|||
logrus.WithError(err).Error("wait exec container's task")
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
logrus.WithError(err).Error("exec container start failure")
|
||||
return
|
||||
}
|
||||
|
||||
spec, err := c.Spec(ctx)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("failed to get spec")
|
||||
|
|
|
@ -36,8 +36,6 @@ import (
|
|||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
const imageName = "docker.io/library/alpine:latest"
|
||||
|
||||
var (
|
||||
ct metrics.LabeledTimer
|
||||
execTimer metrics.LabeledTimer
|
||||
|
@ -136,6 +134,11 @@ func main() {
|
|||
Name: "exec",
|
||||
Usage: "add execs to the stress tests",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "image,i",
|
||||
Value: "docker.io/library/alpine:latest",
|
||||
Usage: "image to be utilized for testing",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json,j",
|
||||
Usage: "output results in json format",
|
||||
|
@ -173,6 +176,7 @@ func main() {
|
|||
Duration: context.GlobalDuration("duration"),
|
||||
Concurrency: context.GlobalInt("concurrent"),
|
||||
Exec: context.GlobalBool("exec"),
|
||||
Image: context.GlobalString("image"),
|
||||
JSON: context.GlobalBool("json"),
|
||||
Metrics: context.GlobalString("metrics"),
|
||||
Runtime: context.GlobalString("runtime"),
|
||||
|
@ -194,6 +198,7 @@ type config struct {
|
|||
Duration time.Duration
|
||||
Address string
|
||||
Exec bool
|
||||
Image string
|
||||
JSON bool
|
||||
Metrics string
|
||||
Runtime string
|
||||
|
@ -206,7 +211,12 @@ func (c config) newClient() (*containerd.Client, error) {
|
|||
|
||||
func serve(c config) error {
|
||||
go func() {
|
||||
if err := http.ListenAndServe(c.Metrics, metrics.Handler()); err != nil {
|
||||
srv := &http.Server{
|
||||
Addr: c.Metrics,
|
||||
Handler: metrics.Handler(),
|
||||
ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout.
|
||||
}
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
logrus.WithError(err).Error("listen and serve")
|
||||
}
|
||||
}()
|
||||
|
@ -228,8 +238,8 @@ func test(c config) error {
|
|||
if err := cleanup(ctx, client); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("pulling %s", imageName)
|
||||
image, err := client.Pull(ctx, imageName, containerd.WithPullUnpack, containerd.WithPullSnapshotter(c.Snapshotter))
|
||||
logrus.Infof("pulling %s", c.Image)
|
||||
image, err := client.Pull(ctx, c.Image, containerd.WithPullUnpack, containerd.WithPullSnapshotter(c.Snapshotter))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows && !freebsd
|
||||
// +build !windows,!freebsd
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
|
@ -19,8 +19,10 @@ package main
|
|||
// register containerd builtins here
|
||||
import (
|
||||
_ "github.com/containerd/containerd/diff/walking/plugin"
|
||||
_ "github.com/containerd/containerd/events/plugin"
|
||||
_ "github.com/containerd/containerd/gc/scheduler"
|
||||
_ "github.com/containerd/containerd/runtime/restart/monitor"
|
||||
_ "github.com/containerd/containerd/runtime/v2"
|
||||
_ "github.com/containerd/containerd/services/containers"
|
||||
_ "github.com/containerd/containerd/services/content"
|
||||
_ "github.com/containerd/containerd/services/diff"
|
||||
|
@ -34,4 +36,5 @@ import (
|
|||
_ "github.com/containerd/containerd/services/snapshots"
|
||||
_ "github.com/containerd/containerd/services/tasks"
|
||||
_ "github.com/containerd/containerd/services/version"
|
||||
_ "github.com/containerd/containerd/tracing/plugin"
|
||||
)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !no_aufs
|
||||
// +build !no_aufs
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !no_btrfs && cgo
|
||||
// +build !no_btrfs,cgo
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build (linux && !no_cri) || (windows && !no_cri)
|
||||
// +build linux,!no_cri windows,!no_cri
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !no_devmapper
|
||||
// +build !no_devmapper
|
||||
|
||||
/*
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
_ "github.com/containerd/containerd/metrics/cgroups"
|
||||
_ "github.com/containerd/containerd/metrics/cgroups/v2"
|
||||
_ "github.com/containerd/containerd/runtime/v1/linux"
|
||||
_ "github.com/containerd/containerd/runtime/v2"
|
||||
_ "github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
_ "github.com/containerd/containerd/snapshots/native/plugin"
|
||||
_ "github.com/containerd/containerd/snapshots/overlay/plugin"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build darwin || freebsd || solaris
|
||||
// +build darwin freebsd solaris
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -21,7 +19,6 @@ package main
|
|||
import (
|
||||
_ "github.com/containerd/containerd/diff/lcow"
|
||||
_ "github.com/containerd/containerd/diff/windows"
|
||||
_ "github.com/containerd/containerd/runtime/v2"
|
||||
_ "github.com/containerd/containerd/snapshots/lcow"
|
||||
_ "github.com/containerd/containerd/snapshots/windows"
|
||||
)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !no_zfs
|
||||
// +build !no_zfs
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !linux && !windows && !solaris
|
||||
// +build !linux,!windows,!solaris
|
||||
|
||||
/*
|
||||
|
|
|
@ -19,7 +19,7 @@ package command
|
|||
import (
|
||||
gocontext "context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
@ -30,12 +30,13 @@ import (
|
|||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
_ "github.com/containerd/containerd/metrics" // import containerd build info
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/services/server"
|
||||
srvconfig "github.com/containerd/containerd/services/server/config"
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/containerd/containerd/tracing"
|
||||
"github.com/containerd/containerd/version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
|
@ -53,7 +54,7 @@ high performance container runtime
|
|||
|
||||
func init() {
|
||||
// Discard grpc logs so that they don't mess with our stdio
|
||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, io.Discard, io.Discard))
|
||||
|
||||
cli.VersionPrinter = func(c *cli.Context) {
|
||||
fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)
|
||||
|
@ -108,13 +109,15 @@ can be used and modified as necessary as a custom configuration.`
|
|||
}
|
||||
app.Action = func(context *cli.Context) error {
|
||||
var (
|
||||
start = time.Now()
|
||||
signals = make(chan os.Signal, 2048)
|
||||
serverC = make(chan *server.Server, 1)
|
||||
ctx = gocontext.Background()
|
||||
config = defaultConfig()
|
||||
start = time.Now()
|
||||
signals = make(chan os.Signal, 2048)
|
||||
serverC = make(chan *server.Server, 1)
|
||||
ctx, cancel = gocontext.WithCancel(gocontext.Background())
|
||||
config = defaultConfig()
|
||||
)
|
||||
|
||||
defer cancel()
|
||||
|
||||
// Only try to load the config if it either exists, or the user explicitly
|
||||
// told us to load this path.
|
||||
configPath := context.GlobalString("config")
|
||||
|
@ -138,20 +141,20 @@ can be used and modified as necessary as a custom configuration.`
|
|||
// Stop if we are registering or unregistering against Windows SCM.
|
||||
stop, err := registerUnregisterService(config.Root)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.L.Fatal(err)
|
||||
}
|
||||
if stop {
|
||||
return nil
|
||||
}
|
||||
|
||||
done := handleSignals(ctx, signals, serverC)
|
||||
done := handleSignals(ctx, signals, serverC, cancel)
|
||||
// start the signal handler as soon as we can to make sure that
|
||||
// we don't miss any signals during boot
|
||||
signal.Notify(signals, handledSignals...)
|
||||
|
||||
// cleanup temp mounts
|
||||
if err := mount.SetTempMountLocation(filepath.Join(config.Root, "tmpmounts")); err != nil {
|
||||
return errors.Wrap(err, "creating temp mount location")
|
||||
return fmt.Errorf("creating temp mount location: %w", err)
|
||||
}
|
||||
// unmount all temp mounts on boot for the server
|
||||
warnings, err := mount.CleanupTempMounts(0)
|
||||
|
@ -163,7 +166,7 @@ can be used and modified as necessary as a custom configuration.`
|
|||
}
|
||||
|
||||
if config.GRPC.Address == "" {
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "grpc address cannot be empty")
|
||||
return fmt.Errorf("grpc address cannot be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
if config.TTRPC.Address == "" {
|
||||
// If TTRPC was not explicitly configured, use defaults based on GRPC.
|
||||
|
@ -176,27 +179,66 @@ can be used and modified as necessary as a custom configuration.`
|
|||
"revision": version.Revision,
|
||||
}).Info("starting containerd")
|
||||
|
||||
server, err := server.New(ctx, config)
|
||||
if err != nil {
|
||||
return err
|
||||
type srvResp struct {
|
||||
s *server.Server
|
||||
err error
|
||||
}
|
||||
|
||||
// Launch as a Windows Service if necessary
|
||||
if err := launchService(server, done); err != nil {
|
||||
logrus.Fatal(err)
|
||||
// run server initialization in a goroutine so we don't end up blocking important things like SIGTERM handling
|
||||
// while the server is initializing.
|
||||
// As an example opening the bolt database will block forever if another containerd is already running and containerd
|
||||
// will have to be `kill -9`'ed to recover.
|
||||
chsrv := make(chan srvResp)
|
||||
go func() {
|
||||
defer close(chsrv)
|
||||
|
||||
server, err := server.New(ctx, config)
|
||||
if err != nil {
|
||||
select {
|
||||
case chsrv <- srvResp{err: err}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Launch as a Windows Service if necessary
|
||||
if err := launchService(server, done); err != nil {
|
||||
log.L.Fatal(err)
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
server.Stop()
|
||||
case chsrv <- srvResp{s: server}:
|
||||
}
|
||||
}()
|
||||
|
||||
var server *server.Server
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case r := <-chsrv:
|
||||
if r.err != nil {
|
||||
return r.err
|
||||
}
|
||||
server = r.s
|
||||
}
|
||||
|
||||
serverC <- server
|
||||
// We don't send the server down serverC directly in the goroutine above because we need it lower down.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case serverC <- server:
|
||||
}
|
||||
|
||||
if config.Debug.Address != "" {
|
||||
var l net.Listener
|
||||
if isLocalAddress(config.Debug.Address) {
|
||||
if l, err = sys.GetLocalListener(config.Debug.Address, config.Debug.UID, config.Debug.GID); err != nil {
|
||||
return errors.Wrapf(err, "failed to get listener for debug endpoint")
|
||||
return fmt.Errorf("failed to get listener for debug endpoint: %w", err)
|
||||
}
|
||||
} else {
|
||||
if l, err = net.Listen("tcp", config.Debug.Address); err != nil {
|
||||
return errors.Wrapf(err, "failed to get listener for debug endpoint")
|
||||
return fmt.Errorf("failed to get listener for debug endpoint: %w", err)
|
||||
}
|
||||
}
|
||||
serve(ctx, l, server.ServeDebug)
|
||||
|
@ -204,37 +246,46 @@ can be used and modified as necessary as a custom configuration.`
|
|||
if config.Metrics.Address != "" {
|
||||
l, err := net.Listen("tcp", config.Metrics.Address)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get listener for metrics endpoint")
|
||||
return fmt.Errorf("failed to get listener for metrics endpoint: %w", err)
|
||||
}
|
||||
serve(ctx, l, server.ServeMetrics)
|
||||
}
|
||||
// setup the ttrpc endpoint
|
||||
tl, err := sys.GetLocalListener(config.TTRPC.Address, config.TTRPC.UID, config.TTRPC.GID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get listener for main ttrpc endpoint")
|
||||
return fmt.Errorf("failed to get listener for main ttrpc endpoint: %w", err)
|
||||
}
|
||||
serve(ctx, tl, server.ServeTTRPC)
|
||||
|
||||
if config.GRPC.TCPAddress != "" {
|
||||
l, err := net.Listen("tcp", config.GRPC.TCPAddress)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get listener for TCP grpc endpoint")
|
||||
return fmt.Errorf("failed to get listener for TCP grpc endpoint: %w", err)
|
||||
}
|
||||
serve(ctx, l, server.ServeTCP)
|
||||
}
|
||||
// setup the main grpc endpoint
|
||||
l, err := sys.GetLocalListener(config.GRPC.Address, config.GRPC.UID, config.GRPC.GID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get listener for main endpoint")
|
||||
return fmt.Errorf("failed to get listener for main endpoint: %w", err)
|
||||
}
|
||||
serve(ctx, l, server.ServeGRPC)
|
||||
|
||||
if err := notifyReady(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notify ready failed")
|
||||
}
|
||||
readyC := make(chan struct{})
|
||||
go func() {
|
||||
server.Wait()
|
||||
close(readyC)
|
||||
}()
|
||||
|
||||
log.G(ctx).Infof("containerd successfully booted in %fs", time.Since(start).Seconds())
|
||||
<-done
|
||||
select {
|
||||
case <-readyC:
|
||||
if err := notifyReady(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notify ready failed")
|
||||
}
|
||||
log.G(ctx).Infof("containerd successfully booted in %fs", time.Since(start).Seconds())
|
||||
<-done
|
||||
case <-done:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return app
|
||||
|
@ -260,6 +311,8 @@ func applyFlags(context *cli.Context, config *srvconfig.Config) error {
|
|||
if err := setLogFormat(config); err != nil {
|
||||
return err
|
||||
}
|
||||
setLogHooks()
|
||||
|
||||
for _, v := range []struct {
|
||||
name string
|
||||
d *string
|
||||
|
@ -293,36 +346,22 @@ func setLogLevel(context *cli.Context, config *srvconfig.Config) error {
|
|||
l = config.Debug.Level
|
||||
}
|
||||
if l != "" {
|
||||
lvl, err := logrus.ParseLevel(l)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.SetLevel(lvl)
|
||||
return log.SetLevel(l)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setLogFormat(config *srvconfig.Config) error {
|
||||
f := config.Debug.Format
|
||||
f := log.OutputFormat(config.Debug.Format)
|
||||
if f == "" {
|
||||
f = log.TextFormat
|
||||
}
|
||||
|
||||
switch f {
|
||||
case log.TextFormat:
|
||||
logrus.SetFormatter(&logrus.TextFormatter{
|
||||
TimestampFormat: log.RFC3339NanoFixed,
|
||||
FullTimestamp: true,
|
||||
})
|
||||
case log.JSONFormat:
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{
|
||||
TimestampFormat: log.RFC3339NanoFixed,
|
||||
})
|
||||
default:
|
||||
return errors.Errorf("unknown log format: %s", f)
|
||||
}
|
||||
return log.SetFormat(f)
|
||||
}
|
||||
|
||||
return nil
|
||||
func setLogHooks() {
|
||||
logrus.StandardLogger().AddHook(tracing.NewLogrusHook())
|
||||
}
|
||||
|
||||
func dumpStacks(writeToFile bool) {
|
||||
|
@ -337,7 +376,7 @@ func dumpStacks(writeToFile bool) {
|
|||
bufferLen *= 2
|
||||
}
|
||||
buf = buf[:stackSize]
|
||||
logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
|
||||
log.L.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
|
||||
|
||||
if writeToFile {
|
||||
// Also write to file to aid gathering diagnostics
|
||||
|
@ -348,6 +387,6 @@ func dumpStacks(writeToFile bool) {
|
|||
}
|
||||
defer f.Close()
|
||||
f.WriteString(string(buf))
|
||||
logrus.Infof("goroutine stack dump written to %s", name)
|
||||
log.L.Infof("goroutine stack dump written to %s", name)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || darwin || freebsd || solaris
|
||||
// +build linux darwin freebsd solaris
|
||||
|
||||
/*
|
||||
|
@ -35,7 +36,7 @@ var handledSignals = []os.Signal{
|
|||
unix.SIGPIPE,
|
||||
}
|
||||
|
||||
func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server) chan struct{} {
|
||||
func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server, cancel func()) chan struct{} {
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
var server *server.Server
|
||||
|
@ -60,11 +61,10 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se
|
|||
log.G(ctx).WithError(err).Error("notify stopping failed")
|
||||
}
|
||||
|
||||
if server == nil {
|
||||
close(done)
|
||||
return
|
||||
cancel()
|
||||
if server != nil {
|
||||
server.Stop()
|
||||
}
|
||||
server.Stop()
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server) chan struct{} {
|
||||
func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server, cancel func()) chan struct{} {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
var server *server.Server
|
||||
|
@ -54,12 +54,12 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se
|
|||
log.G(ctx).WithError(err).Error("notify stopping failed")
|
||||
}
|
||||
|
||||
if server == nil {
|
||||
close(done)
|
||||
return
|
||||
cancel()
|
||||
if server != nil {
|
||||
server.Stop()
|
||||
}
|
||||
server.Stop()
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue