File tree Expand file tree Collapse file tree 4 files changed +25
-25
lines changed
docs/content/docs/getting-started Expand file tree Collapse file tree 4 files changed +25
-25
lines changed Original file line number Diff line number Diff line change @@ -2,10 +2,10 @@ context_size: 4096
22f16 : true
33backend : llama-cpp
44mmap : true
5- mmproj : minicpm-v-2_6 -mmproj-f16.gguf
5+ mmproj : minicpm-v-4_5 -mmproj-f16.gguf
66name : gpt-4o
77parameters :
8- model : minicpm-v-2_6 -Q4_K_M.gguf
8+ model : minicpm-v-4_5 -Q4_K_M.gguf
99stopwords :
1010- <|im_end|>
1111- <dummy32000>
@@ -42,9 +42,9 @@ template:
4242 <|im_start|>assistant
4343
4444download_files :
45- - filename : minicpm-v-2_6 -Q4_K_M.gguf
46- sha256 : 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
47- uri : huggingface://openbmb/MiniCPM-V-2_6 -gguf/ggml-model-Q4_K_M.gguf
48- - filename : minicpm-v-2_6 -mmproj-f16.gguf
49- uri : huggingface://openbmb/MiniCPM-V-2_6 -gguf/mmproj-model-f16.gguf
50- sha256 : 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
45+ - filename : minicpm-v-4_5 -Q4_K_M.gguf
46+ sha256 : c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
47+ uri : huggingface://openbmb/MiniCPM-V-4_5 -gguf/ggml-model-Q4_K_M.gguf
48+ - filename : minicpm-v-4_5 -mmproj-f16.gguf
49+ uri : huggingface://openbmb/MiniCPM-V-4_5 -gguf/mmproj-model-f16.gguf
50+ sha256 : 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8
Original file line number Diff line number Diff line change @@ -2,10 +2,10 @@ context_size: 4096
22backend : llama-cpp
33f16 : true
44mmap : true
5- mmproj : minicpm-v-2_6 -mmproj-f16.gguf
5+ mmproj : minicpm-v-4_5 -mmproj-f16.gguf
66name : gpt-4o
77parameters :
8- model : minicpm-v-2_6 -Q4_K_M.gguf
8+ model : minicpm-v-4_5 -Q4_K_M.gguf
99stopwords :
1010- <|im_end|>
1111- <dummy32000>
@@ -42,9 +42,9 @@ template:
4242 <|im_start|>assistant
4343
4444download_files :
45- - filename : minicpm-v-2_6 -Q4_K_M.gguf
46- sha256 : 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
47- uri : huggingface://openbmb/MiniCPM-V-2_6 -gguf/ggml-model-Q4_K_M.gguf
48- - filename : minicpm-v-2_6 -mmproj-f16.gguf
49- uri : huggingface://openbmb/MiniCPM-V-2_6 -gguf/mmproj-model-f16.gguf
50- sha256 : 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
45+ - filename : minicpm-v-4_5 -Q4_K_M.gguf
46+ sha256 : c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
47+ uri : huggingface://openbmb/MiniCPM-V-4_5 -gguf/ggml-model-Q4_K_M.gguf
48+ - filename : minicpm-v-4_5 -mmproj-f16.gguf
49+ uri : huggingface://openbmb/MiniCPM-V-4_5 -gguf/mmproj-model-f16.gguf
50+ sha256 : 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8
Original file line number Diff line number Diff line change @@ -2,10 +2,10 @@ context_size: 4096
22backend : llama-cpp
33f16 : true
44mmap : true
5- mmproj : minicpm-v-2_6 -mmproj-f16.gguf
5+ mmproj : minicpm-v-4_5 -mmproj-f16.gguf
66name : gpt-4o
77parameters :
8- model : minicpm-v-2_6 -Q4_K_M.gguf
8+ model : minicpm-v-4_5 -Q4_K_M.gguf
99stopwords :
1010- <|im_end|>
1111- <dummy32000>
@@ -43,9 +43,9 @@ template:
4343
4444
4545download_files :
46- - filename : minicpm-v-2_6 -Q4_K_M.gguf
47- sha256 : 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
48- uri : huggingface://openbmb/MiniCPM-V-2_6 -gguf/ggml-model-Q4_K_M.gguf
49- - filename : minicpm-v-2_6 -mmproj-f16.gguf
50- uri : huggingface://openbmb/MiniCPM-V-2_6 -gguf/mmproj-model-f16.gguf
51- sha256 : 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
46+ - filename : minicpm-v-4_5 -Q4_K_M.gguf
47+ sha256 : c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
48+ uri : huggingface://openbmb/MiniCPM-V-4_5 -gguf/ggml-model-Q4_K_M.gguf
49+ - filename : minicpm-v-4_5 -mmproj-f16.gguf
50+ uri : huggingface://openbmb/MiniCPM-V-4_5 -gguf/mmproj-model-f16.gguf
51+ sha256 : 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8
Original file line number Diff line number Diff line change @@ -182,7 +182,7 @@ MODEL_NAME=gemma-3-12b-it docker compose up
182182
183183# NVIDIA GPU setup with custom multimodal and image models
184184MODEL_NAME=gemma-3-12b-it \
185- MULTIMODAL_MODEL=minicpm-v-2_6 \
185+ MULTIMODAL_MODEL=minicpm-v-4_5 \
186186IMAGE_MODEL=flux.1-dev-ggml \
187187docker compose -f docker-compose.nvidia.yaml up
188188```
You can’t perform that action at this time.
0 commit comments