From ae152ccc4eb1e21261cb7405a4eaa1bb941ddac6 Mon Sep 17 00:00:00 2001 From: jimmychen Date: Sat, 4 Apr 2026 22:27:11 -0400 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0deepseek-rs=E5=88=86=E6=9E=90?= =?UTF-8?q?=E6=8C=87=E4=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- llama-cpp.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/llama-cpp.md b/llama-cpp.md index ec7eac6..2641dd0 100644 --- a/llama-cpp.md +++ b/llama-cpp.md @@ -11,3 +11,10 @@ No thinking: --chat-template-kwargs "{\"enable_thinking\": false}" ./llama-server --model /home/jimmy/NVME/model/gemma-4-31B-it-UD-Q4_K_XL.gguf --mmproj /home/jimmy/NVME/model/gemma4-31b-mmproj-BF16.gguf --temp 1.0 --top-p 0.95 --top-k 64 --alias "gemma-4-31B" --host 0.0.0.0 --port 11434 --kv-unified --cache-type-k q8_0 --cache-type-v q8_0 --flash-attn on --fit on --ctx-size 100000 --jinja --no-mmap -t 24 -np 4 -ngl 999 --chat-template-kwargs '{"enable_thinking":true}' --n-cpu-moe 5 + + +## deepseek-rs + +./deepwiki-rs -p ./src --llm-api-base-url http://192.168.2.105:11434 --model-efficient Qwen3.5-35B-A3B --target-language zh --max-parallels 4 --max-tokens 262144 --force-regenerate + +