We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0484f64 commit e499848Copy full SHA for e499848
docker/Dockerfile-base
@@ -40,9 +40,11 @@ RUN git lfs install --skip-repo && \
40
41
RUN if [ "$TORCH_CUDA_ARCH_LIST" = "9.0+PTX" ] ; then \
42
git clone https://github.com/Dao-AILab/flash-attention.git; \
43
+ cd flash-attention; \
44
git checkout v2.7.4.post1; \
- cd flash-attention/hopper; \
45
+ cd hopper; \
46
FLASH_ATTENTION_DISABLE_SM80=TRUE FLASH_ATTENTION_DISABLE_FP8=TRUE MAX_JOBS=128 python setup.py install; \
- elif if [ "$PYTORCH_VERSION" = "2.7.0" ] ; then \
47
+ cd ../..; \
48
+ elif[ "$PYTORCH_VERSION" = "2.7.0" ] ; then \
49
pip3 install flash-attn==2.7.4.post1; \
50
fi
0 commit comments