Skip to content

Commit b00959a

Browse files
committed
Add gputil to vllm env
1 parent 12a14a3 commit b00959a

File tree

2 files changed

+2
-4
lines changed

2 files changed

+2
-4
lines changed

docker_build_script_ubuntu.sh

+2-1
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ print('Done!')
9393
"
9494

9595
# Install vllm
96+
# gputil is for rayWorker in vllm to run as non-root
9697
export VLLM_CACHE=/workspace/.vllm_cache
9798
cd /h2ogpt_conda && python -m venv vllm_env --system-site-packages
9899
sp=`python3.10 -c 'import site; print(site.getsitepackages()[0])'` && \
@@ -110,7 +111,7 @@ sp=`python3.10 -c 'import site; print(site.getsitepackages()[0])'` && \
110111
find openai_vllm -name '*.py' | xargs sed -i 's/OpenAI/vLLM/g' && \
111112
cd /h2ogpt_conda && \
112113
python -m venv vllm_env --system-site-packages && \
113-
/h2ogpt_conda/vllm_env/bin/python -m pip install vllm ray pandas --extra-index-url https://download.pytorch.org/whl/cu118 && \
114+
/h2ogpt_conda/vllm_env/bin/python -m pip install vllm ray pandas gputil==1.4.0 --extra-index-url https://download.pytorch.org/whl/cu118 && \
114115
mkdir $VLLM_CACHE
115116
chmod -R a+rwx /h2ogpt_conda
116117

requirements.txt

-3
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,6 @@ text-generation==0.6.0
6666
# for tokenization when don't have HF tokenizer
6767
tiktoken==0.4.0
6868

69-
# for rayWorker in vllm to run as non-root
70-
gputil==1.4.0
71-
7269
requests>=2.31.0
7370
urllib3>=1.26.16
7471
filelock>=3.12.2

0 commit comments

Comments
 (0)