--- /srv/rebuilderd/tmp/rebuilderd8dHauj/inputs/llama.cpp-tools_6641+dfsg-1_ppc64el.deb +++ /srv/rebuilderd/tmp/rebuilderd8dHauj/out/llama.cpp-tools_6641+dfsg-1_ppc64el.deb ├── control.tar.xz │ ├── control.tar │ │ ├── ./md5sums │ │ │ ├── ./md5sums │ │ │ │┄ Files differ ├── data.tar.xz │ ├── data.tar │ │ ├── file list │ │ │ @@ -41,11 +41,11 @@ │ │ │ -rw-r--r-- 0 root (0) root (0) 4122 2025-09-30 07:36:33.000000 ./usr/share/llama.cpp-tools/llama-server/themes/wild/favicon.ico │ │ │ -rw-r--r-- 0 root (0) root (0) 34367 2025-09-30 07:36:33.000000 ./usr/share/llama.cpp-tools/llama-server/themes/wild/index.html │ │ │ -rw-r--r-- 0 root (0) root (0) 76484 2025-09-30 07:36:33.000000 ./usr/share/llama.cpp-tools/llama-server/themes/wild/llama_cpp.png │ │ │ -rw-r--r-- 0 root (0) root (0) 259586 2025-09-30 07:36:33.000000 ./usr/share/llama.cpp-tools/llama-server/themes/wild/llamapattern.png │ │ │ -rw-r--r-- 0 root (0) root (0) 496463 2025-09-30 07:36:33.000000 ./usr/share/llama.cpp-tools/llama-server/themes/wild/wild.png │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-10-05 20:09:48.000000 ./usr/share/man/ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-10-05 20:09:48.000000 ./usr/share/man/man1/ │ │ │ --rw-r--r-- 0 root (0) root (0) 1157 2025-10-05 20:09:48.000000 ./usr/share/man/man1/llama-bench.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 1158 2025-10-05 20:09:48.000000 ./usr/share/man/man1/llama-bench.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 7189 2025-10-05 20:09:48.000000 ./usr/share/man/man1/llama-cli.1.gz │ │ │ -rw-r--r-- 0 root (0) root (0) 1497 2025-10-05 20:09:48.000000 ./usr/share/man/man1/llama-quantize.1.gz │ │ │ --rw-r--r-- 0 root (0) root (0) 8465 2025-10-05 20:09:48.000000 ./usr/share/man/man1/llama-server.1.gz │ │ │ +-rw-r--r-- 0 root (0) root (0) 8466 2025-10-05 20:09:48.000000 ./usr/share/man/man1/llama-server.1.gz │ │ ├── ./usr/share/man/man1/llama-bench.1.gz │ │ │ ├── llama-bench.1 │ │ │ │ @@ -1,13 +1,13 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-BENCH "1" "October 2025" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-bench \- llama-bench │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/powerpc64le\-linux\-gnu/ggml/backends0/libggml\-cpu\-power9.so\/\fP │ │ │ │ +load_backend: loaded CPU backend from \fI\,/usr/lib/powerpc64le\-linux\-gnu/ggml/backends0/libggml\-cpu\-power10.so\/\fP │ │ │ │ usage: obj\-powerpc64le\-linux\-gnu/bin/llama\-bench [options] │ │ │ │ .SS "options:" │ │ │ │ .HP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR │ │ │ │ .TP │ │ │ │ \fB\-\-numa\fR │ │ │ │ numa mode (default: disabled) │ │ │ │ @@ -63,15 +63,15 @@ │ │ │ │ \fB\-ctk\fR, \fB\-\-cache\-type\-k\fR │ │ │ │ (default: f16) │ │ │ │ .TP │ │ │ │ \fB\-ctv\fR, \fB\-\-cache\-type\-v\fR │ │ │ │ (default: f16) │ │ │ │ .TP │ │ │ │ \fB\-t\fR, \fB\-\-threads\fR │ │ │ │ -(default: 8) │ │ │ │ +(default: 4) │ │ │ │ .TP │ │ │ │ \fB\-C\fR, \fB\-\-cpu\-mask\fR │ │ │ │ (default: 0x0) │ │ │ │ .TP │ │ │ │ \fB\-\-cpu\-strict\fR <0|1> │ │ │ │ (default: 0) │ │ │ │ .TP │ │ ├── ./usr/share/man/man1/llama-cli.1.gz │ │ │ ├── llama-cli.1 │ │ │ │ @@ -1,13 +1,13 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-CLI "1" "October 2025" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-cli \- llama-cli │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/powerpc64le\-linux\-gnu/ggml/backends0/libggml\-cpu\-power9.so\/\fP │ │ │ │ +load_backend: loaded CPU backend from \fI\,/usr/lib/powerpc64le\-linux\-gnu/ggml/backends0/libggml\-cpu\-power10.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ \fB\-\-verbose\-prompt\fR print a verbose prompt before generation (default: false) │ │ │ │ \fB\-t\fR, \fB\-\-threads\fR N number of threads to use during generation (default: \fB\-1\fR) │ │ ├── ./usr/share/man/man1/llama-server.1.gz │ │ │ ├── llama-server.1 │ │ │ │ @@ -1,13 +1,13 @@ │ │ │ │ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. │ │ │ │ .TH LLAMA-SERVER "1" "October 2025" "debian" "User Commands" │ │ │ │ .SH NAME │ │ │ │ llama-server \- llama-server │ │ │ │ .SH DESCRIPTION │ │ │ │ -load_backend: loaded CPU backend from \fI\,/usr/lib/powerpc64le\-linux\-gnu/ggml/backends0/libggml\-cpu\-power9.so\/\fP │ │ │ │ +load_backend: loaded CPU backend from \fI\,/usr/lib/powerpc64le\-linux\-gnu/ggml/backends0/libggml\-cpu\-power10.so\/\fP │ │ │ │ \fB\-\-\-\-\-\fR common params \fB\-\-\-\-\-\fR │ │ │ │ .PP │ │ │ │ \fB\-h\fR, \fB\-\-help\fR, \fB\-\-usage\fR print usage and exit │ │ │ │ \fB\-\-version\fR show version and build info │ │ │ │ \fB\-\-completion\-bash\fR print source\-able bash completion script for llama.cpp │ │ │ │ \fB\-\-verbose\-prompt\fR print a verbose prompt before generation (default: false) │ │ │ │ \fB\-t\fR, \fB\-\-threads\fR N number of threads to use during generation (default: \fB\-1\fR)