镜像构建历史
# 2023-02-28 09:52:09 0.00B 设置默认要执行的命令
CMD ["/sbin/init" "--log-target=journal"]
# 2023-02-28 09:52:09 0.00B 设置环境变量 NVIDIA_REQUIRE_CUDA brand
ENV NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419
# 2023-02-28 09:52:09 0.00B 设置环境变量 NVIDIA_DRIVER_CAPABILITIES
ENV NVIDIA_DRIVER_CAPABILITIES=video,compute,utility
# 2023-02-28 09:52:09 0.00B 设置环境变量 NVIDIA_VISIBLE_DEVICES
ENV NVIDIA_VISIBLE_DEVICES=all
# 2023-02-28 09:52:09 0.00B 设置环境变量 LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/lib
# 2023-02-28 09:52:09 0.00B 设置环境变量 PATH
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# 2023-02-28 09:52:09 0.00B 添加元数据标签
LABEL com.nvidia.volumes.needed=nvidia_driver com.nvidia.cuda.verison=10.2.89
# 2023-02-28 09:52:09 0.00B 设置停止容器时发送的系统调用信号
STOPSIGNAL SIGRTMIN+3
# 2023-02-28 09:52:09 0.00B 创建挂载点用于持久化数据或共享数据
VOLUME [/sys/fs/cgroup /tmp /run /run/lock]
# 2023-02-28 09:52:09 149.45MB 执行命令并创建新的镜像层
RUN |8 CUDA_VER=10-2 CUDA_VERSION=10.2 TF_VERSION= TRT_VERSION= TORCH_VERSION=1.9.1 CUDA_CUDART_VERSION=10.2.89-1 NVIDIA_CUDA_VERSION=10.2.89 NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419 /bin/sh -c python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && dpkg -i /opt/release/*.deb && (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); rm -f /lib/systemd/system/multi-user.target.wants/*; rm -f /etc/systemd/system/*.wants/*; rm -f /lib/systemd/system/local-fs.target.wants/*; rm -f /lib/systemd/system/sockets.target.wants/*udev*; rm -f /lib/systemd/system/sockets.target.wants/*initctl*; rm -f /lib/systemd/system/basic.target.wants/*; rm -f /lib/systemd/system/anaconda.target.wants/*; sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && sed -i '/include/i\/usr/local/lib' /etc/ld.so.conf && echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf && sed -i "32aPermitRootLogin yes" /etc/ssh/sshd_config && echo 'export TMOUT=0' >> /etc/bash.bashrc && echo 'export HISTSIZE=1000' >> /etc/bash.bashrc && echo '[ -n "${SSH_TTY}" ] && export $(cat /proc/1/environ|tr "\\0" "\\n"|xargs)' >> /etc/bash.bashrc && echo 'export PS1="\[\e[35;1m\][\u@\h \W]$ \[\e[0m\]"' >> ~/.bashrc && echo "ldconfig &>/dev/null" >> /etc/bash.bashrc && systemctl enable ssh # buildkit
# 2023-02-28 09:52:05 4.51GB 执行命令并创建新的镜像层
RUN |8 CUDA_VER=10-2 CUDA_VERSION=10.2 TF_VERSION= TRT_VERSION= TORCH_VERSION=1.9.1 CUDA_CUDART_VERSION=10.2.89-1 NVIDIA_CUDA_VERSION=10.2.89 NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419 /bin/sh -c apt update && if [ "${CUDA_VERSION}" = "10.2" ]; then dnn_ver="8=8.0.0.180-1+cuda10.2"; elif [ "${CUDA_VERSION}" = "11.2" ]; then dnn_ver="8=8.4.1.50-1+cuda11.6";fi && apt install -y --no-install-recommends libcudnn${dnn_ver} libcudnn${dnn_ver%=*}-dev=${dnn_ver#*=} && if [ -n "${TF_VERSION}" ]; then curl -LO https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && tar zxf libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && cp -af lib include /usr/local/ && python3 -m pip install --no-cache-dir tensorflow-gpu==2.6.0; elif [ -n "${TORCH_VERSION}" ]; then curl -LO https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.9.1%2Bcu102.zip && unzip libtorch-*.zip -d /root >/dev/null 2>&1 && cp -af libtorch/* /usr/local/; elif [ -n "${TRT_VERSION}" ]; then if [ "${TRT_VERSION}" = "7.1.3.4" ]; then trt_ver="7=7.1.3-1+cuda10.2"; elif [ "${TRT_VERSION}" = "8.4.2.4" ]; then trt_ver="8=8.4.2-1+cuda11.6";fi && apt install -y --no-install-recommends libnvinfer${trt_ver} libnvinfer-dev=${trt_ver#*=} libnvparsers${trt_ver} libnvparsers-dev=${trt_ver#*=} libnvonnxparsers${trt_ver} libnvonnxparsers-dev=${trt_ver#*=} libnvinfer-plugin${trt_ver} libnvinfer-plugin-dev=${trt_ver#*=} python3-libnvinfer=${trt_ver#*=} python3-libnvinfer-dev=${trt_ver#*=};fi && rm -rf /var/lib/apt/lists/* /root/* # buildkit
# 2023-02-28 09:50:06 2.86GB 执行命令并创建新的镜像层
RUN |8 CUDA_VER=10-2 CUDA_VERSION=10.2 TF_VERSION= TRT_VERSION= TORCH_VERSION=1.9.1 CUDA_CUDART_VERSION=10.2.89-1 NVIDIA_CUDA_VERSION=10.2.89 NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419 /bin/sh -c mkdir -p /root/.pip && echo "[global]" > /root/.pip/pip.conf && echo "index-url = https://pypi.python.org/simple" >>/root/.pip/pip.conf && echo "trusted-host = pypi.python.org" >>/root/.pip/pip.conf && echo "timeout = 120" >>/root/.pip/pip.conf && python3 -m pip install --upgrade pip && python3 -m pip install --no-cache-dir psutil pillow wheel numpy pyyaml requests opencv-python==4.5.5.64 && curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub | apt-key add - && curl -fsSL https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub | apt-key add - && echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list && echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list && apt update && apt install -y --no-install-recommends cuda-cudart-${CUDA_VER}=${CUDA_CUDART_VERSION} cuda-minimal-build-${CUDA_VER} cuda-libraries-dev-${CUDA_VER} cuda-command-line-tools-${CUDA_VER} && ln -s cuda-${CUDA_VERSION} /usr/local/cuda && curl https://nodejs.org/dist/v16.13.2/node-v16.13.2-linux-x64.tar.xz|tar -xJ && cp -af node-v16.13.2-linux-x64/* /usr/local/ && npm install -g npm@latest && npm -v && node -v && npm install -g @angular/cli && npm cache clean --force && rm -rf /var/lib/apt/lists/* /root/* # buildkit
# 2023-02-28 09:47:47 2.10GB 执行命令并创建新的镜像层
RUN |8 CUDA_VER=10-2 CUDA_VERSION=10.2 TF_VERSION= TRT_VERSION= TORCH_VERSION=1.9.1 CUDA_CUDART_VERSION=10.2.89-1 NVIDIA_CUDA_VERSION=10.2.89 NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419 /bin/sh -c ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo "Asia/Shanghai" > /etc/timezone && echo "deb http://archive.ubuntu.com/ubuntu/ bionic-proposed main restricted universe multiverse" >>/etc/apt/sources.list && export DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=low TZ=Asia/Shanghai && apt update && apt install -y python3.7-dev python3-pip python3-apt python3-setuptools apt-utils && apt install -y dbus systemd systemd-cron iproute2 gnupg2 curl libcurl4-openssl-dev ca-certificates build-essential unzip ffmpeg sudo bash vim gdb git doxygen autoconf cmake gettext openssh-server pkg-config kmod net-tools pciutils libgtk-3-dev libprotobuf-c-dev protobuf-c-compiler duktape-dev libssl-dev libcpprest-dev libswscale-dev libavformat-dev graphviz libgraphviz-dev libfuse-dev netcat clang clang-tidy-10 ccache libgoogle-glog-dev libtbb-dev && update-ca-certificates && apt upgrade -y && ln -sf clang-tidy-10 /usr/bin/clang-tidy && ln -sf run-clang-tidy-10 /usr/bin/run-clang-tidy && ln -sf python3.7 /usr/bin/python3 && ln -sf opencv4/opencv2 /usr/local/include/opencv2 && rm -rf /var/lib/apt/lists/* # buildkit
# 2023-02-28 09:43:37 0.00B 设置工作目录为/root
WORKDIR /root
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG NVIDIA_REQUIRE_CUDA
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG NVIDIA_CUDA_VERSION
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG CUDA_CUDART_VERSION
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG TORCH_VERSION
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG TRT_VERSION
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG TF_VERSION
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG CUDA_VERSION
# 2023-02-28 09:43:37 0.00B 定义构建参数
ARG CUDA_VER
# 2023-02-28 09:43:37 172.62MB 复制文件或目录到容器中
ADD *.tar.gz /usr/local/ # buildkit
# 2023-02-28 09:43:34 204.03MB 复制新文件或目录到容器中
COPY release /opt/release # buildkit
# 2023-01-26 18:03:05 0.00B
/bin/sh -c #(nop) CMD ["/bin/bash"]
# 2023-01-26 18:03:04 63.15MB
/bin/sh -c #(nop) ADD file:365c129e10f7ef1594e8086543b45f524313e36dd6a25b68f4da542a09491f04 in /
# 2023-01-26 18:03:03 0.00B
/bin/sh -c #(nop) LABEL org.opencontainers.image.version=18.04
# 2023-01-26 18:03:03 0.00B
/bin/sh -c #(nop) LABEL org.opencontainers.image.ref.name=ubuntu
# 2023-01-26 18:03:03 0.00B
/bin/sh -c #(nop) ARG LAUNCHPAD_BUILD_ARCH
# 2023-01-26 18:03:03 0.00B
/bin/sh -c #(nop) ARG RELEASE
镜像信息
{
"Id": "sha256:4f2da63d9f5d46a7d53b7846f00de7b13ae57c71b6cac9b61a5b27fdce0774db",
"RepoTags": [
"modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64:v1.6.1",
"swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64:v1.6.1"
],
"RepoDigests": [
"modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64@sha256:5857138164d95b55b3f218930fb543d8138c928a3fe819d900ff3b16e4a508dc",
"swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64@sha256:5d6e10d8779c4e1f52d69afd206378acc627ebe10ab80dfe94a469821166c33f"
],
"Parent": "",
"Comment": "buildkit.dockerfile.v0",
"Created": "2023-02-28T01:52:09.126366966Z",
"Container": "",
"ContainerConfig": null,
"DockerVersion": "",
"Author": "",
"Config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/lib",
"NVIDIA_VISIBLE_DEVICES=all",
"NVIDIA_DRIVER_CAPABILITIES=video,compute,utility",
"NVIDIA_REQUIRE_CUDA=cuda\u003e=10.2 brand=tesla,driver\u003e=418,driver\u003c419"
],
"Cmd": [
"/sbin/init",
"--log-target=journal"
],
"ArgsEscaped": true,
"Image": "",
"Volumes": {
"/run": {},
"/run/lock": {},
"/sys/fs/cgroup": {},
"/tmp": {}
},
"WorkingDir": "/root",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
"com.nvidia.cuda.verison": "10.2.89",
"com.nvidia.volumes.needed": "nvidia_driver",
"org.opencontainers.image.ref.name": "ubuntu",
"org.opencontainers.image.version": "18.04"
},
"StopSignal": "SIGRTMIN+3"
},
"Architecture": "amd64",
"Os": "linux",
"Size": 10059381469,
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/4cd0729f1d5eb5e2148394d35c0a672172f7f2403b8b4358cbfd18fede53ba8a/diff:/var/lib/docker/overlay2/493a20db1d5e4c802803ed6e47fbb7771b5078ae71b445a79e7c2652589cc482/diff:/var/lib/docker/overlay2/9fac5ad4b3a59763d39ba6aa9fa7dcd7a4737612b717c80a4de77ff07c682389/diff:/var/lib/docker/overlay2/3e1608c75b6d6ab1a8eea17cc79bdbfc5f49101fd7c803dc20e3948e1cd1c9fc/diff:/var/lib/docker/overlay2/50ea4be3224e56f5e312a478407eec9f4a763dc2acfbea0fee52b39ec30b8905/diff:/var/lib/docker/overlay2/2d3d447963395b6885f27858eee6f216fb659245e989a3dc0d961d0e5e059540/diff:/var/lib/docker/overlay2/9af0c4463cdd27c150c237ae0b86b60cbe0f1f3de1316e0f3108197eea6ecaec/diff",
"MergedDir": "/var/lib/docker/overlay2/8b0eef1cba1a2682413527ff6498f97403b89ee7c67a84996a6580933ae8172e/merged",
"UpperDir": "/var/lib/docker/overlay2/8b0eef1cba1a2682413527ff6498f97403b89ee7c67a84996a6580933ae8172e/diff",
"WorkDir": "/var/lib/docker/overlay2/8b0eef1cba1a2682413527ff6498f97403b89ee7c67a84996a6580933ae8172e/work"
},
"Name": "overlay2"
},
"RootFS": {
"Type": "layers",
"Layers": [
"sha256:475a54c2a93de61ab1a000184b41b5c5370eef3842486f6c185cd9a001ff1a92",
"sha256:4b8a7b0c54e3277561378428c6c31817b783a486537dedd67a77869bf91146d1",
"sha256:cd06cb483672f0ada9bdbd2f21bf6537354269b794b2032cc21843b95318153d",
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
"sha256:984a4ee79c21f9fcfe26718a53795538691193aef92d959c60880536e3dd16e0",
"sha256:ff1c19316f0f54e101dd5e0996555c81d2887f641753076117146be084470438",
"sha256:9185813cb580700011a9a4e3ca469065cfa528b401a49692858784a94c741ea5",
"sha256:b5d5b3fdb57d874109090f10f1107772af32eac59d9431881a28b5377a703cac"
]
},
"Metadata": {
"LastTagTime": "2025-07-30T23:03:28.761315152+08:00"
}
}