From a7b7250706c4acf90e124b882e295158e7f84ab0 Mon Sep 17 00:00:00 2001 From: ygn-ndwd-official Date: Fri, 8 Dec 2023 17:13:17 +0800 Subject: [PATCH 1/2] add cuda --- chatglm/README.md | 40 ++++++++++++++++ .../Dockerfile | 29 ++++++++++++ .../Dockerfile | 23 ++++++++++ chatglm/meta.yml | 5 ++ cuda/README.md | 30 ++++++++++++ .../Dockerfile | 46 +++++++++++++++++++ cuda/meta.yml | 2 + llama/README.md | 40 ++++++++++++++++ .../Dockerfile | 21 +++++++++ .../Dockerfile | 16 +++++++ llama/meta.yml | 5 ++ pytorch/README.md | 38 +++++++++++++++ pytorch/meta.yml | 5 ++ .../Dockerfile | 9 ++++ pytorch/pytorch2.1.0-oe2203sp2/Dockerfile | 12 +++++ tensorflow/README.md | 39 ++++++++++++++++ tensorflow/meta.yml | 5 ++ .../tensorflow2.15.0-oe2203sp2/Dockerfile | 11 +++++ .../Dockerfile | 8 ++++ tensorrt/README.md | 31 +++++++++++++ tensorrt/meta.yml | 2 + .../Dockerfile | 26 +++++++++++ 22 files changed, 443 insertions(+) create mode 100644 chatglm/README.md create mode 100644 chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile create mode 100644 chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile create mode 100644 chatglm/meta.yml create mode 100644 cuda/README.md create mode 100644 cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile create mode 100644 cuda/meta.yml create mode 100644 llama/README.md create mode 100644 llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile create mode 100644 llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile create mode 100644 llama/meta.yml create mode 100644 pytorch/README.md create mode 100644 pytorch/meta.yml create mode 100644 pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile create mode 100644 pytorch/pytorch2.1.0-oe2203sp2/Dockerfile create mode 100644 tensorflow/README.md create mode 100644 tensorflow/meta.yml create mode 100644 tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile create mode 100644 tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile create mode 100644 tensorrt/README.md create mode 100644 tensorrt/meta.yml create mode 100644 tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile diff --git a/chatglm/README.md b/chatglm/README.md new file mode 100644 index 00000000..6b3f0691 --- /dev/null +++ b/chatglm/README.md @@ -0,0 +1,40 @@ +# ChatGLM + +# Quick reference + +- ChatGLM devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build chatglm int8 image and push: +```shell +cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +cd ../../chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/chatglm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +2. Build chatglm image and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/chatglm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +3. Run: +```shell +docker run -d openeuler/chatglm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2 +docker run -d openeuler/chatglm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: ChatGLM 2-6b int8 quantization model, openEuler 22.03 LTS-SP2 +- chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: ChatGLM 2-6b, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile b/chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile new file mode 100644 index 00000000..5c24476d --- /dev/null +++ b/chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile @@ -0,0 +1,29 @@ +FROM openeuler/pytorch:pytorch2.1.0-oe2203sp2 + +RUN dnf install -y git python3 python3-pip cmake make gcc gcc-c++ && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cpu + +RUN cd ~/ && \ + git clone https://github.com/li-plus/chatglm.cpp.git && \ + cd chatglm.cpp && \ + git checkout v0.3.0 && \ + cd ~/chatglm.cpp && \ + git submodule update --init --recursive && \ + cmake -B build && \ + cmake --build build -j --config Release && \ + pip install --no-cache-dir torch tabulate tqdm transformers accelerate sentencepiece + +RUN cd ~/ && \ + pip install huggingface_hub && \ + export HF_ENDPOINT=https://hf-mirror.com && \ + huggingface-cli download --resume-download THUDM/chatglm2-6b --local-dir chatglm2-6b && \ + python3 chatglm.cpp/chatglm_cpp/convert.py -i chatglm2-6b -t q8_0 -o chatglm2-6b-int8.bin && \ + rm -rf chatglm2-6b && \ + rm -rf /root/.cache/huggingface + +WORKDIR /root/chatglm.cpp diff --git a/chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 00000000..2f44eba0 --- /dev/null +++ b/chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,23 @@ +FROM openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN dnf install -y git && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN cd ~/ && \ + git clone https://github.com/THUDM/ChatGLM2-6B.git && \ + cd ChatGLM2-6B && \ + pip install --no-cache-dir -r requirements.txt && \ + sed -i 's/THUDM/\/root/g' web_demo.py && \ + sed -i 's/THUDM/\/root/g' web_demo2.py && \ + sed -i 's/THUDM/\/root/g' cli_demo.py && \ + pip install --no-cache-dir gradio==3.50.0 + +RUN cd ~/ && \ + pip install --no-cache-dir huggingface_hub && \ + export HF_ENDPOINT=https://hf-mirror.com && \ + huggingface-cli download --resume-download THUDM/chatglm2-6b --local-dir chatglm2-6b + +WORKDIR /root/ChatGLM2-6B diff --git a/chatglm/meta.yml b/chatglm/meta.yml new file mode 100644 index 00000000..6735b7aa --- /dev/null +++ b/chatglm/meta.yml @@ -0,0 +1,5 @@ +chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: + - chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/cuda/README.md b/cuda/README.md new file mode 100644 index 00000000..521f00b8 --- /dev/null +++ b/cuda/README.md @@ -0,0 +1,30 @@ +# cuda + +# Quick reference + +- cuda devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +2. Run: +```shell +docker run -d openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- cuda12.2.0-cudnn8.9.5.30-oe2203sp2 cuda v12.2.0, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 00000000..b0129e67 --- /dev/null +++ b/cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,46 @@ +FROM openeuler/openeuler:22.03-lts-sp2 + +ARG CUDA_URL=https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run +ARG LIBNCCL_URL=https://developer.download.nvidia.cn/compute/cuda/repos/rhel8/x86_64/libnccl-2.18.5-1+cuda12.2.x86_64.rpm +ARG CUDA_CUPTI_URL=https://developer.download.nvidia.cn/compute/cuda/repos/rhel8/x86_64/cuda-cupti-12-2-12.2.142-1.x86_64.rpm +ARG CUDNN_LINUX=cudnn-linux-x86_64-8.9.5.30_cuda12-archive +ARG CUDNN_URL=https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/source/Packages/${CUDNN_LINUX}.tar.xz +ARG NCCL=nccl_2.19.3-1+cuda12.2_x86_64 +ARG NCCL_URL=https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/source/Packages/nccl_2.19.3-1%2Bcuda12.2_x86_64.txz + +ENV PATH="$PATH:/usr/local/cuda/bin" +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64" + +RUN dnf install wget kmod gcc tar make dkms xz python3 python3-pip -y && \ + rm -rf /var/cache/dnf + +RUN wget ${CUDA_URL} -O ~/cuda-toolkit.run && \ + sh ~/cuda-toolkit.run --silent --toolkit && \ + rm -rf ~/cuda-toolkit.run && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN wget ${LIBNCCL_URL} -O ~/libnccl.rpm && \ + dnf install -y ~/libnccl.rpm && \ + rm -rf ~/libnccl.rpm && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN wget ${CUDA_CUPTI_URL} -O ~/cuda-cupti.rpm && \ + dnf install -y ~/cuda-cupti.rpm && \ + rm -rf ~/cuda-cupti.rpm && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN cd ~/ && wget ${CUDNN_URL} && \ + tar -xf ${CUDNN_LINUX}.tar.xz && \ + cp ~/${CUDNN_LINUX}/include/* /usr/local/cuda/include/ && \ + cp ~/${CUDNN_LINUX}/lib/* /usr/local/cuda/lib64/ && \ + rm -rf ~/${CUDNN_LINUX}* && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN cd ~/ && wget ${NCCL_URL} && \ + tar -xf ${NCCL}.txz && \ + cp ~/${NCCL}/include/* /usr/local/cuda/include/ && \ + cp -r ~/${NCCL}/lib/* /usr/local/cuda/lib64/ && \ + rm -rf ~/${NCCL}* && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +WORKDIR /root diff --git a/cuda/meta.yml b/cuda/meta.yml new file mode 100644 index 00000000..6939e059 --- /dev/null +++ b/cuda/meta.yml @@ -0,0 +1,2 @@ +cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile diff --git a/llama/README.md b/llama/README.md new file mode 100644 index 00000000..021938b0 --- /dev/null +++ b/llama/README.md @@ -0,0 +1,40 @@ +# Llama + +# Quick reference + +- Llama devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build llama int8 images and push: +```shell +cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llama/llama2-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/llama:llama2-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +2. Build llama int8 images and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2 && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/llama:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +3. Run: +```shell +docker run -d openeuler/llama:llama2-int8-pytorch2.1.0-oe2203sp2 +docker run -d openeuler/llama:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- llama2-int8-pytorch2.1.0-oe2203sp2: llama2 int8 quantization model, openEuler 22.03 LTS-SP2 +- llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: llama2 model, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile b/llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile new file mode 100644 index 00000000..5db4f6af --- /dev/null +++ b/llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile @@ -0,0 +1,21 @@ +FROM openeuler/pytorch:pytorch2.1.0-oe2203sp2 + +RUN dnf install -y wget git python3 python3-pip cmake make gcc gcc-c++ tar && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN cd ~/ && git clone https://github.com/ggerganov/llama.cpp.git && cd llama.cpp && git checkout b1555 + +RUN cd ~/llama.cpp && \ + cmake -B build && cmake --build build --config Release && \ + pip install --no-cache-dir -r requirements.txt && \ + wget https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/model/llama/llama.tar.gz && \ + tar -xf llama.tar.gz && \ + sed -i '924d' convert.py && \ + python3 convert.py ./llama/llama-2-7b && \ + ./build/bin/quantize ./llama/llama-2-7b/ggml-model-f16.gguf ./llama-2-7b-q8_0.gguf q8_0 && \ + rm -rf llama llama.tar.gz + +WORKDIR /root/llama.cpp diff --git a/llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 00000000..993b7a6e --- /dev/null +++ b/llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,16 @@ +FROM openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN dnf install -y git python3 python3-pip wget && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + + +RUN cd ~/ && \ + wget https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/model/llama/llama.tar.gz && \ + tar -xf llama.tar.gz && rm -rf llama.tar.gz && \ + cd llama && \ + pip install --no-cache-dir -e . + +WORKDIR /root/llama diff --git a/llama/meta.yml b/llama/meta.yml new file mode 100644 index 00000000..dbb0841d --- /dev/null +++ b/llama/meta.yml @@ -0,0 +1,5 @@ +llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +llama2-int8-pytorch2.1.0-oe2203sp2: + - llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/pytorch/README.md b/pytorch/README.md new file mode 100644 index 00000000..dd26b791 --- /dev/null +++ b/pytorch/README.md @@ -0,0 +1,38 @@ +# pytorch + +# Quick reference + +- pytorch devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images for cpu and push: +```shell +cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +2. Build images for gpu and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +3. Run: +```shell +docker run -d openeuler/pytorch:pytorch2.1.0-oe2203sp2 +docker run -d openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- pytorch2.1.0-oe2203sp2: pytorch 2.1.0-cpu, openEuler 22.03 LTS-SP2 +- pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: pytorch 2.1.0-gpu, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/pytorch/meta.yml b/pytorch/meta.yml new file mode 100644 index 00000000..884390cd --- /dev/null +++ b/pytorch/meta.yml @@ -0,0 +1,5 @@ +pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +pytorch2.1.0-oe2203sp2: + - pytorch/pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 00000000..fed90f9e --- /dev/null +++ b/pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,9 @@ +FROM openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + + +RUN pip install --no-cache-dir torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 numpy + +WORKDIR /root diff --git a/pytorch/pytorch2.1.0-oe2203sp2/Dockerfile b/pytorch/pytorch2.1.0-oe2203sp2/Dockerfile new file mode 100644 index 00000000..2175fb16 --- /dev/null +++ b/pytorch/pytorch2.1.0-oe2203sp2/Dockerfile @@ -0,0 +1,12 @@ +FROM openeuler/openeuler:22.03-lts-sp2 + +RUN dnf install python3 python3-pip -y && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cpu +RUN pip install --no-cache-dir numpy + +WORKDIR /root diff --git a/tensorflow/README.md b/tensorflow/README.md new file mode 100644 index 00000000..0a4bd72c --- /dev/null +++ b/tensorflow/README.md @@ -0,0 +1,39 @@ +# tensorflow + +# Quick reference + +- tensorflow devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images for cpu and push: +```shell +cd tensorflow/tensorflow2.15.0-oe2203sp2/ && docker buildx build -t "openeuler/tensorflow:tensorflow2.15.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +2. Build images for gpu and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/tensorflow:tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +3. Run: +```shell +docker run -d openeuler/tensorflow:tensorflow2.15.0-oe2203sp2 +docker run -d openeuler/tensorflow:tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- tensorflow2.15.0-oe2203sp2: tensorflow 2.15.0, openEuler 22.03 LTS-SP2 +- tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: tensorflow 2.15.0 cuda image, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/tensorflow/meta.yml b/tensorflow/meta.yml new file mode 100644 index 00000000..1c37779d --- /dev/null +++ b/tensorflow/meta.yml @@ -0,0 +1,5 @@ +tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +tensorflow2.15.0-oe2203sp2: + - tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile diff --git a/tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile b/tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile new file mode 100644 index 00000000..1310fd18 --- /dev/null +++ b/tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile @@ -0,0 +1,11 @@ +FROM openeuler/openeuler:22.03-lts-sp2 + +RUN dnf install wget tar xz python3-pip -y && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir tensorflow==2.15.0 + +WORKDIR /root diff --git a/tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 00000000..7750ed71 --- /dev/null +++ b/tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,8 @@ +FROM openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir tensorflow==2.15.0 numpy + +WORKDIR /root diff --git a/tensorrt/README.md b/tensorrt/README.md new file mode 100644 index 00000000..54ed2978 --- /dev/null +++ b/tensorrt/README.md @@ -0,0 +1,31 @@ +# tensorrt + +# Quick reference + +- tensorrt devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +2. Run: +```shell +docker run -d openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: tensorrt 8.6.1.6, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/tensorrt/meta.yml b/tensorrt/meta.yml new file mode 100644 index 00000000..0bb3efc0 --- /dev/null +++ b/tensorrt/meta.yml @@ -0,0 +1,2 @@ +trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile diff --git a/tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 00000000..05e67574 --- /dev/null +++ b/tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,26 @@ +FROM openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +ARG TENSORRT=TensorRT-8.6.1.6 +ARG TENSORRT_TAR_GZ=${TENSORRT}.Linux.x86_64-gnu.cuda-12.0.tar.gz +ARG TENSORRT_URL=https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/source/Packages/${TENSORRT_TAR_GZ} + +ENV LOCAL_DIR=/usr/local + +ENV LD_LIBRARY_PATH=$LD_LIBRAR_PATH:${LOCAL_DIR}/${TENSORRT}/lib:${LOCAL_DIR}/cuda/lib64 + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN cd ${LOCAL_DIR} && \ + wget ${TENSORRT_URL} && \ + tar -xf ${TENSORRT_TAR_GZ} && \ + cd ${TENSORRT} && \ + pip install --no-cache-dir python/tensorrt-8.6.1-cp39-none-linux_x86_64.whl && \ + pip install --no-cache-dir python/tensorrt_lean-8.6.1-cp39-none-linux_x86_64.whl && \ + pip install --no-cache-dir python/tensorrt_dispatch-8.6.1-cp39-none-linux_x86_64.whl && \ + pip install --no-cache-dir uff/uff-0.6.9-py2.py3-none-any.whl && \ + pip install --no-cache-dir graphsurgeon/graphsurgeon-0.4.6-py2.py3-none-any.whl && \ + find ${LOCAL_DIR}/${TENSORRT}/ -name "*.a" | xargs rm -f && \ + rm -r ${LOCAL_DIR}/${TENSORRT_TAR_GZ} + +WORKDIR /root -- Gitee From b5ff25305a812b2116cd8f0f529751785748b0ba Mon Sep 17 00:00:00 2001 From: ygn-ndwd-official Date: Wed, 13 Dec 2023 09:59:08 +0800 Subject: [PATCH 2/2] change llm --- chatglm/README.md | 40 ------------------- chatglm/meta.yml | 5 --- cuda/README.md | 2 +- llama/meta.yml | 5 --- {llama => llm}/README.md | 32 +++++++++++---- .../Dockerfile | 0 .../Dockerfile | 0 .../Dockerfile | 0 .../Dockerfile | 0 llm/meta.yml | 11 +++++ pytorch/README.md | 4 +- tensorflow/README.md | 4 +- tensorrt/README.md | 2 +- 13 files changed, 41 insertions(+), 64 deletions(-) delete mode 100644 chatglm/README.md delete mode 100644 chatglm/meta.yml delete mode 100644 llama/meta.yml rename {llama => llm}/README.md (39%) rename {chatglm => llm}/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile (100%) rename {chatglm => llm}/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile (100%) rename {llama => llm}/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile (100%) rename {llama => llm}/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile (100%) create mode 100644 llm/meta.yml diff --git a/chatglm/README.md b/chatglm/README.md deleted file mode 100644 index 6b3f0691..00000000 --- a/chatglm/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# ChatGLM - -# Quick reference - -- ChatGLM devel image. - -- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) - -- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) - -# Build reference - -1. Build chatglm int8 image and push: -```shell -cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push -cd ../../chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/chatglm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push -``` - -2. Build chatglm image and push: -```shell -cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push -cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push -cd ../../chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/chatglm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push -``` - -We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) - -3. Run: -```shell -docker run -d openeuler/chatglm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2 -docker run -d openeuler/chatglm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 -``` - -# Supported tags and respective Dockerfile links - -- chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: ChatGLM 2-6b int8 quantization model, openEuler 22.03 LTS-SP2 -- chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: ChatGLM 2-6b, openEuler 22.03 LTS-SP2 - -## Operating System -Linux/Unix, x86-64 diff --git a/chatglm/meta.yml b/chatglm/meta.yml deleted file mode 100644 index 6735b7aa..00000000 --- a/chatglm/meta.yml +++ /dev/null @@ -1,5 +0,0 @@ -chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: - - chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile - -chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: - - chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/cuda/README.md b/cuda/README.md index 521f00b8..c4f6b76f 100644 --- a/cuda/README.md +++ b/cuda/README.md @@ -19,7 +19,7 @@ We are using `buildx` in here to generate multi-arch images, see more in [Docker 2. Run: ```shell -docker run -d openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +docker run -d -it --gpus all openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 ``` # Supported tags and respective Dockerfile links diff --git a/llama/meta.yml b/llama/meta.yml deleted file mode 100644 index dbb0841d..00000000 --- a/llama/meta.yml +++ /dev/null @@ -1,5 +0,0 @@ -llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: - - llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile - -llama2-int8-pytorch2.1.0-oe2203sp2: - - llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/llama/README.md b/llm/README.md similarity index 39% rename from llama/README.md rename to llm/README.md index 021938b0..8ef2197b 100644 --- a/llama/README.md +++ b/llm/README.md @@ -1,8 +1,8 @@ -# Llama +# lla # Quick reference -- Llama devel image. +- lla devel image. - Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) @@ -13,28 +13,44 @@ 1. Build llama int8 images and push: ```shell cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push -cd ../../llama/llama2-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/llama:llama2-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/llama2-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/llm:llama2-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push ``` -2. Build llama int8 images and push: +2. Build llama images and push: ```shell cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2 && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push -cd ../../llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/llama:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/llm:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push ``` +3. Build chatglm int8 image and push: +```shell +cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/llm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +4. Build chatglm image and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/llm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push + We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) -3. Run: +5. Run: ```shell -docker run -d openeuler/llama:llama2-int8-pytorch2.1.0-oe2203sp2 -docker run -d openeuler/llama:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +docker run -d -it openeuler/llm:llama2-int8-pytorch2.1.0-oe2203sp2 +docker run -d -it --gpus all openeuler/llm:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +docker run -d -it openeuler/llm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2 +docker run -d -it --gpus all openeuler/llm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 ``` # Supported tags and respective Dockerfile links - llama2-int8-pytorch2.1.0-oe2203sp2: llama2 int8 quantization model, openEuler 22.03 LTS-SP2 - llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: llama2 model, openEuler 22.03 LTS-SP2 +- chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: ChatGLM 2-6b int8 quantization model, openEuler 22.03 LTS-SP2 +- chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: ChatGLM 2-6b, openEuler 22.03 LTS-SP2 ## Operating System Linux/Unix, x86-64 diff --git a/chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile b/llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile similarity index 100% rename from chatglm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile rename to llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile similarity index 100% rename from chatglm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile rename to llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile diff --git a/llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile b/llm/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile similarity index 100% rename from llama/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile rename to llm/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile similarity index 100% rename from llama/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile rename to llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile diff --git a/llm/meta.yml b/llm/meta.yml new file mode 100644 index 00000000..2fc139c8 --- /dev/null +++ b/llm/meta.yml @@ -0,0 +1,11 @@ +chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: + - llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile + +llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +llama2-int8-pytorch2.1.0-oe2203sp2: + - llm/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/pytorch/README.md b/pytorch/README.md index dd26b791..979e602b 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -25,8 +25,8 @@ We are using `buildx` in here to generate multi-arch images, see more in [Docker 3. Run: ```shell -docker run -d openeuler/pytorch:pytorch2.1.0-oe2203sp2 -docker run -d openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +docker run -d -it openeuler/pytorch:pytorch2.1.0-oe2203sp2 +docker run -d -it --gpus all openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 ``` # Supported tags and respective Dockerfile links diff --git a/tensorflow/README.md b/tensorflow/README.md index 0a4bd72c..7eeda33f 100644 --- a/tensorflow/README.md +++ b/tensorflow/README.md @@ -26,8 +26,8 @@ We are using `buildx` in here to generate multi-arch images, see more in [Docker 3. Run: ```shell -docker run -d openeuler/tensorflow:tensorflow2.15.0-oe2203sp2 -docker run -d openeuler/tensorflow:tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +docker run -d -it openeuler/tensorflow:tensorflow2.15.0-oe2203sp2 +docker run -d -it --gpus all openeuler/tensorflow:tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 ``` # Supported tags and respective Dockerfile links diff --git a/tensorrt/README.md b/tensorrt/README.md index 54ed2978..9608b0a2 100644 --- a/tensorrt/README.md +++ b/tensorrt/README.md @@ -20,7 +20,7 @@ We are using `buildx` in here to generate multi-arch images, see more in [Docker 2. Run: ```shell -docker run -d openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +docker run -d -it --gpus all openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 ``` # Supported tags and respective Dockerfile links -- Gitee