--- /srv/rebuilderd/tmp/rebuilderdyqnARE/inputs/llama.cpp-examples_8681+dfsg-1_arm64.deb +++ /srv/rebuilderd/tmp/rebuilderdyqnARE/out/llama.cpp-examples_8681+dfsg-1_arm64.deb ├── file list │ @@ -1,3 +1,3 @@ │ -rw-r--r-- 0 0 0 4 2026-04-07 16:34:28.000000 debian-binary │ -rw-r--r-- 0 0 0 2412 2026-04-07 16:34:28.000000 control.tar.xz │ --rw-r--r-- 0 0 0 6346016 2026-04-07 16:34:28.000000 data.tar.xz │ +-rw-r--r-- 0 0 0 6350780 2026-04-07 16:34:28.000000 data.tar.xz ├── control.tar.xz │ ├── control.tar │ │ ├── ./md5sums │ │ │ ├── ./md5sums │ │ │ │┄ Files differ ├── data.tar.xz │ ├── data.tar │ │ ├── file list │ │ │ @@ -31,28 +31,28 @@ │ │ │ -rw-r--r-- 0 root (0) root (0) 13098 2026-04-07 16:34:28.000000 ./usr/share/doc/llama.cpp-examples/copyright │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2026-04-07 16:34:28.000000 ./usr/share/lintian/ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2026-04-07 16:34:28.000000 ./usr/share/lintian/overrides/ │ │ │ -rw-r--r-- 0 root (0) root (0) 177 2026-04-07 16:34:28.000000 ./usr/share/lintian/overrides/llama.cpp-examples │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2026-04-07 16:34:28.000000 ./usr/share/man/ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2026-04-07 16:34:28.000000 ./usr/share/man/man1/ │ │ │ -rw-r--r-- 0 root (0) root (0) 6181 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-batched.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6513 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-debug.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6394 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-diffusion-cli.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6514 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-debug.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6396 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-diffusion-cli.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 6497 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-embedding.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 6008 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-eval-callback.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6392 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-finetune.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6388 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-finetune.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 398 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-gguf-hash.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 251 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-gguf.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6046 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-idle.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6004 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-lookahead.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6043 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-idle.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6001 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-lookahead.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 6255 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-lookup-create.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 274 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-lookup-merge.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 6253 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-lookup-stats.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 6243 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-lookup.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6203 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-parallel.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6206 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-parallel.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 6199 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-passkey.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6356 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-retrieval.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6357 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-retrieval.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 6015 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-save-load-state.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 268 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-simple-chat.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 259 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-simple.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6693 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-speculative-simple.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 6681 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-speculative.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6699 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-speculative-simple.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 6686 2026-04-07 16:34:28.000000 ./usr/share/man/man1/llama-speculative.1.gz │ │ ├── ./usr/share/man/man1/llama-batched.1.gz │ │ │ ├── llama-batched.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-BATCHED "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-batched \- llama-batched │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -413,16 +412,19 @@ │ │ │ │ .PP │ │ │ │ \fB\-\-dry\-sequence\-breaker\fR STRING add sequence breaker for DRY sampling, clearing out default breakers │ │ │ │ .TP │ │ │ │ ('\en', ':', '"', '*') in the process; use "none" to not use any │ │ │ │ sequence breakers │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-target\fR N adaptive\-p: select tokens near this probability (valid range 0.0 to │ │ │ │ -.TP │ │ │ │ -1.0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ +1.load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.PP │ │ │ │ +0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ [(more info)](https://github.com/ggml\-org/llama.cpp/pull/17927) │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-decay\fR N adaptive\-p: decay rate for target adaptation over time. lower values │ │ │ │ .TP │ │ │ │ are more reactive, higher values are more stable. │ │ │ │ (valid range 0.0 to 0.99) (default: 0.90) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-debug.1.gz │ │ │ ├── llama-debug.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-DEBUG "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-debug \- llama-debug │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -413,16 +412,19 @@ │ │ │ │ .PP │ │ │ │ \fB\-\-dry\-sequence\-breaker\fR STRING add sequence breaker for DRY sampling, clearing out default breakers │ │ │ │ .TP │ │ │ │ ('\en', ':', '"', '*') in the process; use "none" to not use any │ │ │ │ sequence breakers │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-target\fR N adaptive\-p: select tokens near this probability (valid range 0.0 to │ │ │ │ -.TP │ │ │ │ -1.0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ +1.load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.PP │ │ │ │ +0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ [(more info)](https://github.com/ggml\-org/llama.cpp/pull/17927) │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-decay\fR N adaptive\-p: decay rate for target adaptation over time. lower values │ │ │ │ .TP │ │ │ │ are more reactive, higher values are more stable. │ │ │ │ (valid range 0.0 to 0.99) (default: 0.90) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-diffusion-cli.1.gz │ │ │ ├── llama-diffusion-cli.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-DIFFUSION-CLI "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-diffusion-cli \- llama-diffusion-cli │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -413,16 +412,19 @@ │ │ │ │ .PP │ │ │ │ \fB\-\-dry\-sequence\-breaker\fR STRING add sequence breaker for DRY sampling, clearing out default breakers │ │ │ │ .TP │ │ │ │ ('\en', ':', '"', '*') in the process; use "none" to not use any │ │ │ │ sequence breakers │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-target\fR N adaptive\-p: select tokens near this probability (valid range 0.0 to │ │ │ │ -.TP │ │ │ │ -1.0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ +1.load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.PP │ │ │ │ +0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ [(more info)](https://github.com/ggml\-org/llama.cpp/pull/17927) │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-decay\fR N adaptive\-p: decay rate for target adaptation over time. lower values │ │ │ │ .TP │ │ │ │ are more reactive, higher values are more stable. │ │ │ │ (valid range 0.0 to 0.99) (default: 0.90) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-finetune.1.gz │ │ │ ├── llama-finetune.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-FINETUNE "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-finetune \- llama-finetune │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -81,14 +80,16 @@ │ │ │ │ \fB\-e\fR, \fB\-\-escape\fR, \fB\-\-no\-escape\fR whether to process escapes sequences (\en, \er, \et, \e', \e", \e\e) │ │ │ │ .IP │ │ │ │ (default: false) │ │ │ │ .PP │ │ │ │ \fB\-\-rope\-scaling\fR {none,linear,yarn} RoPE frequency scaling method, defaults to linear unless specified by │ │ │ │ .TP │ │ │ │ the model │ │ │ │ +load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.IP │ │ │ │ (env: LLAMA_ARG_ROPE_SCALING_TYPE) │ │ │ │ .PP │ │ │ │ \fB\-\-rope\-scale\fR N RoPE context scaling factor, expands context by a factor of N │ │ │ │ .IP │ │ │ │ (env: LLAMA_ARG_ROPE_SCALE) │ │ │ │ .PP │ │ │ │ \fB\-\-rope\-freq\-base\fR N RoPE base frequency, used by NTK\-aware scaling (default: loaded from │ │ ├── ./usr/share/man/man1/llama-idle.1.gz │ │ │ ├── llama-idle.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-IDLE "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-idle \- llama-idle │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-\-\-\-\-\fR sampling params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-\-samplers\fR SAMPLERS samplers that will be used for generation in the order, separated by │ │ │ │ .TP │ │ │ │ \&';' │ │ │ │ @@ -401,14 +400,16 @@ │ │ │ │ \fB\-hfv\fR, \fB\-hfrv\fR, \fB\-\-hf\-repo\-v\fR /[:quant] │ │ │ │ .TP │ │ │ │ Hugging Face model repository for the vocoder model (default: unused) │ │ │ │ (env: LLAMA_ARG_HF_REPO_V) │ │ │ │ .PP │ │ │ │ \fB\-hffv\fR, \fB\-\-hf\-file\-v\fR FILE Hugging Face model file for the vocoder model (default: unused) │ │ │ │ .IP │ │ │ │ +load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.IP │ │ │ │ (env: LLAMA_ARG_HF_FILE_V) │ │ │ │ .PP │ │ │ │ \fB\-hft\fR, \fB\-\-hf\-token\fR TOKEN Hugging Face access token (default: value from HF_TOKEN environment │ │ │ │ .TP │ │ │ │ variable) │ │ │ │ (env: HF_TOKEN) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-lookahead.1.gz │ │ │ ├── llama-lookahead.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-LOOKAHEAD "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-lookahead \- llama-lookahead │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-\-\-\-\-\fR sampling params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-\-samplers\fR SAMPLERS samplers that will be used for generation in the order, separated by │ │ │ │ .TP │ │ │ │ \&';' │ │ │ │ @@ -401,14 +400,16 @@ │ │ │ │ \fB\-hfv\fR, \fB\-hfrv\fR, \fB\-\-hf\-repo\-v\fR /[:quant] │ │ │ │ .TP │ │ │ │ Hugging Face model repository for the vocoder model (default: unused) │ │ │ │ (env: LLAMA_ARG_HF_REPO_V) │ │ │ │ .PP │ │ │ │ \fB\-hffv\fR, \fB\-\-hf\-file\-v\fR FILE Hugging Face model file for the vocoder model (default: unused) │ │ │ │ .IP │ │ │ │ +load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.IP │ │ │ │ (env: LLAMA_ARG_HF_FILE_V) │ │ │ │ .PP │ │ │ │ \fB\-hft\fR, \fB\-\-hf\-token\fR TOKEN Hugging Face access token (default: value from HF_TOKEN environment │ │ │ │ .TP │ │ │ │ variable) │ │ │ │ (env: HF_TOKEN) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-lookup.1.gz │ │ │ ├── llama-lookup.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-LOOKUP "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-lookup \- llama-lookup │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -413,16 +412,19 @@ │ │ │ │ .PP │ │ │ │ \fB\-\-dry\-sequence\-breaker\fR STRING add sequence breaker for DRY sampling, clearing out default breakers │ │ │ │ .TP │ │ │ │ ('\en', ':', '"', '*') in the process; use "none" to not use any │ │ │ │ sequence breakers │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-target\fR N adaptive\-p: select tokens near this probability (valid range 0.0 to │ │ │ │ -.TP │ │ │ │ -1.0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ +1.load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.PP │ │ │ │ +0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ [(more info)](https://github.com/ggml\-org/llama.cpp/pull/17927) │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-decay\fR N adaptive\-p: decay rate for target adaptation over time. lower values │ │ │ │ .TP │ │ │ │ are more reactive, higher values are more stable. │ │ │ │ (valid range 0.0 to 0.99) (default: 0.90) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-parallel.1.gz │ │ │ ├── llama-parallel.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-PARALLEL "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-parallel \- llama-parallel │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -413,16 +412,19 @@ │ │ │ │ .PP │ │ │ │ \fB\-\-dry\-sequence\-breaker\fR STRING add sequence breaker for DRY sampling, clearing out default breakers │ │ │ │ .TP │ │ │ │ ('\en', ':', '"', '*') in the process; use "none" to not use any │ │ │ │ sequence breakers │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-target\fR N adaptive\-p: select tokens near this probability (valid range 0.0 to │ │ │ │ -.TP │ │ │ │ -1.0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ +1load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.PP │ │ │ │ +\&.0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ [(more info)](https://github.com/ggml\-org/llama.cpp/pull/17927) │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-decay\fR N adaptive\-p: decay rate for target adaptation over time. lower values │ │ │ │ .TP │ │ │ │ are more reactive, higher values are more stable. │ │ │ │ (valid range 0.0 to 0.99) (default: 0.90) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-retrieval.1.gz │ │ │ ├── llama-retrieval.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-RETRIEVAL "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-retrieval \- llama-retrieval │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -413,16 +412,19 @@ │ │ │ │ .PP │ │ │ │ \fB\-\-dry\-sequence\-breaker\fR STRING add sequence breaker for DRY sampling, clearing out default breakers │ │ │ │ .TP │ │ │ │ ('\en', ':', '"', '*') in the process; use "none" to not use any │ │ │ │ sequence breakers │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-target\fR N adaptive\-p: select tokens near this probability (valid range 0.0 to │ │ │ │ -.TP │ │ │ │ -1.0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ +1.load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +.PP │ │ │ │ +0; negative = disabled) (default: \fB\-1\fR.00) │ │ │ │ +.IP │ │ │ │ [(more info)](https://github.com/ggml\-org/llama.cpp/pull/17927) │ │ │ │ .PP │ │ │ │ \fB\-\-adaptive\-decay\fR N adaptive\-p: decay rate for target adaptation over time. lower values │ │ │ │ .TP │ │ │ │ are more reactive, higher values are more stable. │ │ │ │ (valid range 0.0 to 0.99) (default: 0.90) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-speculative-simple.1.gz │ │ │ ├── llama-speculative-simple.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-SPECULATIVE-SIMPLE "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-speculative-simple \- llama-speculative-simple │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -489,15 +488,16 @@ │ │ │ │ .IP │ │ │ │ same as \fB\-\-threads\-draft\fR) │ │ │ │ .SS "-Cd, --cpu-mask-draft M Draft model CPU affinity mask. Complements cpu-range-draft (default:" │ │ │ │ .IP │ │ │ │ same as \fB\-\-cpu\-mask\fR) │ │ │ │ .PP │ │ │ │ \fB\-Crd\fR, \fB\-\-cpu\-range\-draft\fR lo\-hi Ranges of CPUs for affinity. Complements \fB\-\-cpu\-mask\-draft\fR │ │ │ │ -\fB\-\-cpu\-strict\-draft\fR <0|1> Use strict CPU placement for draft model (default: same as │ │ │ │ +\fB\-\-cpu\-strict\-draft\fR <0|1> Use strict CPU placement for drload_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +aft model (default: same as │ │ │ │ .IP │ │ │ │ \fB\-\-cpu\-strict\fR) │ │ │ │ .PP │ │ │ │ \fB\-\-prio\-draft\fR N set draft process/thread priority : 0\-normal, 1\-medium, 2\-high, │ │ │ │ .IP │ │ │ │ 3\-realtime (default: 0) │ │ │ │ .PP │ │ ├── ./usr/share/man/man1/llama-speculative.1.gz │ │ │ ├── llama-speculative.1 │ │ │ │ @@ -1,13 +1,12 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-SPECULATIVE "1" "April 2026" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-speculative \- llama-speculative │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-license\fR show source code license and dependencies │ │ │ │ \fB\-cl\fR, \fB\-\-cache\-list\fR show list of models in cache │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ @@ -489,15 +488,16 @@ │ │ │ │ .IP │ │ │ │ same as \fB\-\-threads\-draft\fR) │ │ │ │ .SS "-Cd, --cpu-mask-draft M Draft model CPU affinity mask. Complements cpu-range-draft (default:" │ │ │ │ .IP │ │ │ │ same as \fB\-\-cpu\-mask\fR) │ │ │ │ .PP │ │ │ │ \fB\-Crd\fR, \fB\-\-cpu\-range\-draft\fR lo\-hi Ranges of CPUs for affinity. Complements \fB\-\-cpu\-mask\-draft\fR │ │ │ │ -\fB\-\-cpu\-strict\-draft\fR <0|1> Use strict CPU placement for draft model (default: same as │ │ │ │ +\fB\-\-cpu\-strict\-draft\fR <0|1> Use strict CPU placement for drload_backend: loaded CPU backend from \fI\,/usr/lib/aarch64\-linux\-gnu/ggml/backends0/libggml\-cpu\-armv8.2_2.so\/\fP │ │ │ │ +aft model (default: same as │ │ │ │ .IP │ │ │ │ \fB\-\-cpu\-strict\fR) │ │ │ │ .PP │ │ │ │ \fB\-\-prio\-draft\fR N set draft process/thread priority : 0\-normal, 1\-medium, 2\-high, │ │ │ │ .IP │ │ │ │ 3\-realtime (default: 0) │ │ │ │ .PP