gnu: llama-cpp: Produce a portable binary unless tuned.

* gnu/packages/machine-learning.scm (llama-cpp)[arguments]:
Augment #:configure-flags.
[properties]: New field.

Co-authored-by: John Fremlin <john@fremlin.org>
Change-Id: I9b3d72849107a6988fec94dc4a22614443338cb2
This commit is contained in:
Ludovic Courtès 2024-04-05 17:48:57 +02:00
parent a568ac8bcf
commit 560d5c6692
No known key found for this signature in database
GPG key ID: 090B11993D9AEBB5

View file

@ -541,8 +541,16 @@ (define-public llama-cpp
(build-system cmake-build-system)
(arguments
(list
#:configure-flags
'(list "-DLLAMA_BLAS=ON" "-DLLAMA_BLAS_VENDOR=OpenBLAS")
#:configure-flags #~'("-DLLAMA_BLAS=ON"
"-DLLAMA_BLAS_VENDOR=OpenBLAS"
"-DLLAMA_NATIVE=OFF" ;no '-march=native'
"-DLLAMA_FMA=OFF" ;and no '-mfma', etc.
"-DLLAMA_AVX2=OFF"
"-DLLAMA_AVX512=OFF"
"-DLLAMA_AVX512_VBMI=OFF"
"-DLLAMA_AVX512_VNNI=OFF")
#:modules '((ice-9 textual-ports)
(guix build utils)
((guix build python-build-system) #:prefix python:)
@ -580,6 +588,7 @@ (define (make-script script)
(native-inputs (list pkg-config))
(propagated-inputs
(list python-numpy python-pytorch python-sentencepiece openblas))
(properties '((tunable? . #true))) ;use AVX512, FMA, etc. when available
(home-page "https://github.com/ggerganov/llama.cpp")
(synopsis "Port of Facebook's LLaMA model in C/C++")
(description "This package provides a port to Facebook's LLaMA collection