Skip to content

Commit

Permalink
Make it use metal again by buildVariant.
Browse files Browse the repository at this point in the history
  • Loading branch information
manyoso authored and apage43 committed Jun 9, 2023
1 parent d345a24 commit 4b4056d
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 3 deletions.
4 changes: 4 additions & 0 deletions gpt4all-backend/llmodel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,15 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria

//TODO: Auto-detect CUDA/OpenCL
if (buildVariant == "auto") {
#if defined(__APPLE__) && defined(__arm64__) // FIXME: See if metal works for intel macs
buildVariant = "metal";
#else
if (requires_avxonly()) {
buildVariant = "avxonly";
} else {
buildVariant = "default";
}
#endif
}
// Read magic
std::ifstream f(modelPath, std::ios::binary);
Expand Down
6 changes: 3 additions & 3 deletions gpt4all-chat/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,10 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
add_subdirectory(../gpt4all-backend llmodel)

set(METAL_SHADER_FILE)

if(LLAMA_METAL)
set(METAL_SHADER_FILE ../gpt4all-backend/llama.cpp-mainline/ggml-metal.metal)
if(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
set(METAL_SHADER_FILE ../gpt4all-backend/llama.cpp-mainline/ggml-metal.metal)
endif()

qt_add_executable(chat
main.cpp
chat.h chat.cpp
Expand Down

0 comments on commit 4b4056d

Please sign in to comment.