File tree Expand file tree Collapse file tree 1 file changed +34
-0
lines changed
Expand file tree Collapse file tree 1 file changed +34
-0
lines changed Original file line number Diff line number Diff line change 11---
2+ - name: "qwen3.5-35b-a3b-apex"
3+ url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
4+ urls:
5+ - https://huggingface.co/mudler/Qwen3.5-35B-A3B-APEX-GGUF
6+ description: |
7+ Describe the model in a clear and concise way that can be shared in a model gallery.
8+ overrides:
9+ backend: llama-cpp
10+ function:
11+ automatic_tool_parsing_fallback: true
12+ grammar:
13+ disable: true
14+ known_usecases:
15+ - chat
16+ mmproj: llama-cpp/mmproj/Qwen3.5-35B-A3B-APEX-GGUF/mmproj-F16.gguf
17+ options:
18+ - use_jinja:true
19+ parameters:
20+ min_p: 0
21+ model: llama-cpp/models/Qwen3.5-35B-A3B-APEX-GGUF/Qwen3.5-35B-A3B-APEX-Quality.gguf
22+ presence_penalty: 1.5
23+ repeat_penalty: 1
24+ temperature: 0.7
25+ top_k: 20
26+ top_p: 0.8
27+ template:
28+ use_tokenizer_template: true
29+ files:
30+ - filename: llama-cpp/mmproj/Qwen3.5-35B-A3B-APEX-GGUF/mmproj-F16.gguf
31+ sha256: a516ab92e8240da4734d68352bdfba84c16e830ee40010b8fac80d69c77272ff
32+ uri: https://huggingface.co/mudler/Qwen3.5-35B-A3B-APEX-GGUF/resolve/main/mmproj-F16.gguf
33+ - filename: llama-cpp/models/Qwen3.5-35B-A3B-APEX-GGUF/Qwen3.5-35B-A3B-APEX-Quality.gguf
34+ sha256: 50887b60c77ee5c95bc3657814ae993abcab7b2d71868b9af1e84d6badd09a57
35+ uri: https://huggingface.co/mudler/Qwen3.5-35B-A3B-APEX-GGUF/resolve/main/Qwen3.5-35B-A3B-APEX-Quality.gguf
236- name: "qwen_qwen3.5-35b-a3b"
337 url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
438 urls:
You can’t perform that action at this time.
0 commit comments