Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 2c3b46f

Browse files
committedJun 29, 2023
changes to fix build
1 parent c9e1103 commit 2c3b46f

File tree

5 files changed

+26
-20
lines changed

5 files changed

+26
-20
lines changed
 

‎Makefile

+15-13
Original file line numberDiff line numberDiff line change
@@ -184,20 +184,22 @@ ifdef LLAMA_CUDA_KQUANTS_ITER
184184
else
185185
CXXFLAGS += -DK_QUANTS_PER_ITERATION=2
186186
endif
187-
ggml-cuda.o:
188-
CXXFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) \
189-
-DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) \
190-
-DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
187+
188+
ggml-cuda.o: CXXFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) \
189+
-DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) \
190+
-DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
191191
# DGGML_CUDA_DMMV_F16 does not currently work with AMD.
192192
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
193-
$(CXX) $(CXXFLAGS) -x hip -c -o $@ $<
193+
$(CXX) $(CXXFLAGS) -x hip -c -o $@ $<
194+
194195
ggml_v2-cuda.o: otherarch/ggml_v2-cuda.cu otherarch/ggml_v2-cuda.h
195-
$(CXX) $(CXXFLAGS) -x hip -c -o $@ $<
196-
ggml_v2-cuda-legacy.o: otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h
197-
$(CXX) $(CXXFLAGS) -x hip -c -o $@ $<
196+
$(CXX) $(CXXFLAGS) -x hip -c -o $@ $<
198197

198+
ggml_v2-cuda-legacy.o: otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h
199+
$(CXX) $(CXXFLAGS) -x hip -c -o $@ $<
199200
endif # LLAMA_HIPBLAS
200201

202+
201203
ifdef LLAMA_METAL
202204
CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
203205
CXXFLAGS += -DGGML_USE_METAL
@@ -253,11 +255,11 @@ else
253255
OPENBLAS_NOAVX2_BUILD = $(CXX) $(CXXFLAGS) $^ $(ARCH_ADD) -lopenblas -shared -o $@.so $(LDFLAGS)
254256
endif
255257
ifdef LLAMA_CLBLAST
256-
ifeq ($(UNAME_S),Darwin)
257-
CLBLAST_BUILD = $(CXX) $(CXXFLAGS) $^ -lclblast -framework OpenCL $(ARCH_ADD) -lopenblas -shared -o $@.so $(LDFLAGS)
258-
else
259-
CLBLAST_BUILD = $(CXX) $(CXXFLAGS) $^ -lclblast -lOpenCL $(ARCH_ADD) -lopenblas -shared -o $@.so $(LDFLAGS)
260-
endif
258+
ifeq ($(UNAME_S),Darwin)
259+
CLBLAST_BUILD = $(CXX) $(CXXFLAGS) $^ -lclblast -framework OpenCL $(ARCH_ADD) -lopenblas -shared -o $@.so $(LDFLAGS)
260+
else
261+
CLBLAST_BUILD = $(CXX) $(CXXFLAGS) $^ -lclblast -lOpenCL $(ARCH_ADD) -lopenblas -shared -o $@.so $(LDFLAGS)
262+
endif
261263
endif
262264

263265
ifdef LLAMA_CUBLAS

‎otherarch/gptj_v3.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@
1818

1919
#ifdef GGML_USE_CUBLAS
2020
#include "ggml-cuda.h"
21-
#elif defined(GGML_USE_CLBLAST)
21+
#endif
22+
#if defined(GGML_USE_CLBLAST)
2223
#include "ggml-opencl.h"
2324
#endif
2425

@@ -639,4 +640,4 @@ bool gptj_eval(
639640
ggml_free(ctx0);
640641

641642
return true;
642-
}
643+
}

‎otherarch/llama_v2.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@
1111
#include "ggml_v2.h"
1212
#ifdef GGML_USE_CUBLAS
1313
#include "ggml_v2-cuda.h"
14-
#elif defined(GGML_USE_CLBLAST)
14+
#endif
15+
#if defined(GGML_USE_CLBLAST)
1516
#include "ggml_v2-opencl.h"
1617
#endif
1718

@@ -3098,4 +3099,4 @@ std::vector<llama_token> llama_v2_tokenize(struct llama_v2_context * ctx, const
30983099
res.resize(n);
30993100

31003101
return res;
3101-
}
3102+
}

‎otherarch/mpt_v3.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@
1818

1919
#ifdef GGML_USE_CUBLAS
2020
#include "ggml-cuda.h"
21-
#elif defined(GGML_USE_CLBLAST)
21+
#endif
22+
#if defined(GGML_USE_CLBLAST)
2223
#include "ggml-opencl.h"
2324
#endif
2425

‎otherarch/neox_v3.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,8 @@
1616

1717
#ifdef GGML_USE_CUBLAS
1818
#include "ggml-cuda.h"
19-
#elif defined(GGML_USE_CLBLAST)
19+
#endif
20+
#if defined(GGML_USE_CLBLAST)
2021
#include "ggml-opencl.h"
2122
#endif
2223

@@ -658,4 +659,4 @@ bool gpt_neox_eval(
658659
ggml_free(ctx0);
659660

660661
return true;
661-
}
662+
}

0 commit comments

Comments
 (0)
Please sign in to comment.