From 5f7664b854660187c225f569868c8540de94a973 Mon Sep 17 00:00:00 2001 From: jimmychen Date: Sun, 12 Apr 2026 10:09:26 -0400 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0qwopus=203.5=2027B=E5=90=AF?= =?UTF-8?q?=E5=8A=A8=E6=8C=87=E4=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- llama-cpp.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/llama-cpp.md b/llama-cpp.md index 2641dd0..c69790e 100644 --- a/llama-cpp.md +++ b/llama-cpp.md @@ -1,3 +1,5 @@ +## Qwen3.5 35B A3B + ./llama-server --model /home/jimmy/NVME/model/Qwen3.5-35B-A3B-UD-IQ4_XS.gguf --alias "Qwen3.5-35B-A3B" --temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 --host 0.0.0.0 --port 11434 --kv-unified --cache-type-k q8_0 --cache-type-v q8_0 --flash-attn on --fit on --ctx-size 262144 --jinja --no-mmap -t 24 -np 4 -ngl 999 context_size 128K = 131072 @@ -5,6 +7,9 @@ context_size 128K = 131072 No thinking: --chat-template-kwargs "{\"enable_thinking\": false}" +## Qwen3.5 27B + +./llama-server --model /home/jimmy/NVME/model/Qwopus3.5-27B-v3-Q4_K_S.gguf --alias "Qwen3.5-35B-A3B" --temp 0.6 --top-p 0.95 --top-k 20 --min-p 0.00 --host 0.0.0.0 --port 11434 --kv-unified --cache-type-k q8_0 --cache-type-v q8_0 --flash-attn on --fit on --ctx-size 262144 --jinja --no-mmap -t 24 -np 4 -ngl 999 ## gemma 4