From 8acb45e09ae8b0a4d882fc3f0a61944041fa2127 Mon Sep 17 00:00:00 2001 From: bachelor-dou <15529241576@163.com> Date: Fri, 9 May 2025 16:16:27 +0800 Subject: [PATCH 1/3] ollama model download and test script --- download_test_models.sh | 132 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100755 download_test_models.sh diff --git a/download_test_models.sh b/download_test_models.sh new file mode 100755 index 0000000..f4bfc72 --- /dev/null +++ b/download_test_models.sh @@ -0,0 +1,132 @@ +#!/bin/bash + +# Display help information +usage() { + echo "Usage: $0 [Options] [Arguments...]" + echo "" + echo "Options:" + echo " -h, --help Show this help message" + echo " -m, --models Specify one or more models, separated by ','" + echo " -f, --file FILE Specify the input file(.txt),with model names separated by spaces or newlines(default: all models of the ollama library)" + echo " -s, --maxsize Specify the maximum size of the model that can be downloaded (default: 40GB)" + echo " -d, --down_model_path Specify the model download directory (default: current directory)" + echo " -b, --llama-bench_path Absolute path of llama-bench" + exit 0 +} + +# default value +file_name="" +models="" +maxsize=40 +model_path="." +llama_bench_path="" + +declare -a model_names + +# Parsing command line arguments +while getopts "hm:f:s:d:b:" opt; do + case $opt in + h) + usage + exit 0 + ;; + m) + models=$OPTARG + ;; + f) + file_name=$OPTARG + if [[ ! -f "$file_name" ]]; then + echo "Error: File '$file_name' not found!" + exit 1 + fi + ;; + s) + maxsize=$OPTARG + ;; + d) + model_path=$OPTARG + ;; + b) + llama_bench_path=$OPTARG + ;; + *) + usage + exit 1 + ;; + esac +done + +# Check if omdd exists in the current directory +if [ ! -f "./omdd" ];then + wget -O omdd https://github.com/amirrezaDev1378/ollama-model-direct-download/releases/download/v2.1.1/omdd-linux-arm64 + chmod +x omdd +fi + +# acquire model name +if [ ! -z "$models" ]; then + IFS=',' read -r -a model_names <<< "$models" + +elif [ ! -z "$file_name" ];then + echo "Reading contents of $file_name" + model_names=($(cat $file_name)) + +else + url="https://ollama.com/search" + model_names=($(curl -s "$url" | grep "x-test-search-response-title" | awk -F '>' '{print $2}' | awk -F '<' '{print $1}')) + +fi + + +if [ -f "./model_urls.txt" ]; then + rm "./model_urls.txt" +fi + +# Filter tags with the smallest number of parameters according to the model name. +for model_name in "${model_names[@]}";do + url="https://ollama.com/library/$model_name/tags" + echo "Processing: $url, model_name = $model_name" + model_tag=($(curl -s "$url" | grep "library/$model_name:" | grep -oP '(?<=/library/)[^"]+' | grep -E 'q4_0|q8_0|fp16' | sort -t ':' -k2,2 -n | perl -ne 'print if /q4_0/ && !$q4++; print if /q8_0/ && !$q8++; print if /fp16/ && !$fp16++; exit if $q4 && $q8 && $fp16')) + for tag in "${model_tag[@]}";do + echo "$tag `./omdd get $tag | grep '1 -' | awk '{print $3}'`" >> model_urls.txt + done +done + + +all_model_args="" + +# Limiting the size of downloaded files +max_size=$(($maxsize * 1024 * 1024 * 1024)) +while read -r name url; do + echo "Checking size for $name..." + + size=$(curl -sI "$url" | grep -i "Content-Length" | cut -d ' ' -f2 | tr -d '\r') + + if [[ -z "$size" ]]; then + echo "Warning: Unable to determine size for $name. Skipping..." + continue + fi + + if ((size > max_size)); then + echo "Skipping $name, file size exceeds $maxsize GB ($((size / 1024 / 1024 / 1024)) GB)" + continue + fi + + all_model_args="$all_model_args -m $model_path/$name" + echo "Downloading $name from $url (Size: $((size / 1024 / 1024)) MB)..." + if [[ -f "$model_path/$name" ]]; then + local_size=$(stat -c %s "$model_path/$name") + echo $local_size + if ((local_size == size)); then + echo "$name is already fully downloaded. Skipping..." + continue + fi + fi + + wget -c --progress=bar -P "$model_path" -O "$model_path/$name" "$url" + +done < model_urls.txt + +#echo $llama_bench_path $all_model_args -p \"what is a car?\" -ngl 99 -o csv +llama_bench_res=$($llama_bench_path $all_model_args -p "what is a car?" -ngl 99 -o csv) + +echo $llama_bench_res >> llama_bench_data.csv From 4be46e3497799de2a25cbd03346ee9d8e107f2e9 Mon Sep 17 00:00:00 2001 From: bachelor-dou <15529241576@163.com> Date: Wed, 14 May 2025 17:14:37 +0800 Subject: [PATCH 2/3] update --- hf-models.txt | 32 +++++++++++++++++++++----------- hf_model_dowload.sh | 6 +++--- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/hf-models.txt b/hf-models.txt index b024238..205d593 100644 --- a/hf-models.txt +++ b/hf-models.txt @@ -6,9 +6,12 @@ microsoft/Phi-3.5-MoE-instruct aisingapore/Llama-SEA-LION-v3-8B-IT -allenai/OLMoE-1B-7B-0924 +allenai/OLMoE-1B-7B-0924-Instruct-GGUF/olmoe-1b-7b-0924-instruct-f16.gguf +allenai/OLMoE-1B-7B-0924-Instruct-GGUF/olmoe-1b-7b-0924-instruct-q4_0.gguf +allenai/OLMoE-1B-7B-0924-Instruct-GGUF/olmoe-1b-7b-0924-instruct-q8_0.gguf -abeja/gpt-neox-japanese-2.7b +mmnga/stockmark-gpt-neox-japanese-1.4b-gguf/stockmark-gpt-neox-japanese-1.4b-q4_0.gguf +mmnga/stockmark-gpt-neox-japanese-1.4b-gguf/stockmark-gpt-neox-japanese-1.4b-q8_0.gguf EleutherAI/pythia-6.9b abacusai/Llama-3-Smaug-8B @@ -16,13 +19,16 @@ TheBloke/Poro-34B-GGUF/poro-34b.Q4_0.gguf TheBloke/Poro-34B-GGUF/poro-34b.Q8_0.gguf 1bitLLM/bitnet_b1_58-xl google/flan-t5-base -apple/OpenELM-1_1B-Instruct +QuietImpostor/OpenELM-3B-Instruct-GGUFs/OpenELM-3B-Instruct-F16.gguf +QuietImpostor/OpenELM-3B-Instruct-GGUFs/OpenELM-3B-Instruct-Q4_0.gguf +QuietImpostor/OpenELM-3B-Instruct-GGUFs/OpenELM-3B-Instruct-Q8_0.gguf THUDM/glm-edge-4b-chat THUDM/GLM-4-9B-0414 HuggingFaceTB/SmolLM-135M LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct tiiuae/falcon-mamba-7b -erfanvaredi/jais-7b-chat +mradermacher/jais-family-1p3b-chat-GGUF/jais-family-1p3b-chat.f16.gguf +mradermacher/jais-family-1p3b-chat-GGUF/jais-family-1p3b-chat.Q8_0.gguf speakleash/Bielik-11B-v2.3-Instruct bartowski/rwkv-6-world-7b-GGUF/rwkv-6-world-7b-Q4_0.gguf recursal/QRWKV6-32B-Instruct-Preview-v0.1 @@ -33,16 +39,20 @@ inclusionAI/Ling-lite - -liuhaotian/llava-v1.6-mistral-7b -SkunkworksAI/BakLLaVA-1 +llava-hf/llava-v1.6-34b-hf +llava-hf/llava-1.5-7b-hf +cjpais/llava-v1.6-vicuna-7b-gguf/llava-v1.6-vicuna-7b.Q8_0.gguf +abetlen/BakLLaVA-1-GGUF/bakllava-1.Q4_0.gguf +abetlen/BakLLaVA-1-GGUF/bakllava-1.Q8_0.gguf +abetlen/BakLLaVA-1-GGUF/bakllava-1.f16.gguf NousResearch/Obsidian-3B-V0.5 -Lin-Chen/ShareGPT4V-7B -mtgv/MobileVLM_V2-1.7B -01-ai/Yi-VL-6B +NousResearch/Obsidian-3B-V0.5-GGUF/obsidian-f16.gguf +Galunid/ShareGPT4V-gguf/ShareGPT4V-f16.gguf +ZiangWu/MobileVLM_V2-1.7B-GGUF/mmproj-model-f16.gguf +cmp-nct/Yi-VL-34B-GGUF/mmproj-model-f16.gguf openbmb/MiniCPM-o-2_6-gguf/Model-7.6B-Q8_0.gguf openbmb/MiniCPM-o-2_6-gguf/Model-7.6B-F16.gguf vikhyatk/moondream2 -BAAI/Bunny-v1_0-3B +BAAI/Bunny-v1_0-4B-gguf/ggml-model-f16.gguf THUDM/glm-edge-1.5b-chat Qwen/Qwen2-VL-2B-Instruct \ No newline at end of file diff --git a/hf_model_dowload.sh b/hf_model_dowload.sh index 65bd94d..148d1ec 100755 --- a/hf_model_dowload.sh +++ b/hf_model_dowload.sh @@ -146,7 +146,7 @@ END fi fi - run_inference_with_expect "$gguf_path" "what is a car?" 30 "$llamaCpp_path/build/bin/llama-cli" > /dev/null 2>&1 + run_inference_with_expect "$gguf_path" "what is a car?" 60 "$llamaCpp_path/build/bin/llama-cli" > /dev/null 2>&1 ret=$? if [ $ret -eq 0 ]; then echo "$model Success inference" @@ -160,7 +160,7 @@ END download_path="$model_path/hf-models/$model_name" echo "============Attempting to download model: $model (Attempt: $((attempt + 1)))============" - huggingface-cli download "$model" --local-dir "$download_path" > /de v/null 2>&1 + huggingface-cli download "$model" --local-dir "$download_path" status=$? if [ $status -eq 0 ]; then @@ -196,7 +196,7 @@ END fi fi - run_inference_with_expect "$quant_out" "what is a car?" 30 "$llamaCpp_path/build/bin/llama-cli" > /dev/null 2>&1 + run_inference_with_expect "$quant_out" "what is a car?" 60 "$llamaCpp_path/build/bin/llama-cli" > /dev/null 2>&1 ret=$? if [ $ret -eq 0 ]; then echo "$model Success inference ($quant_type)" From d06d946990c497963bcd54854fdd6ccd5f6e6f77 Mon Sep 17 00:00:00 2001 From: bachelor-dou <15529241576@163.com> Date: Thu, 15 May 2025 14:24:01 +0800 Subject: [PATCH 3/3] update --- hf_model_dowload.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hf_model_dowload.sh b/hf_model_dowload.sh index 148d1ec..2d15b4b 100755 --- a/hf_model_dowload.sh +++ b/hf_model_dowload.sh @@ -160,7 +160,7 @@ END download_path="$model_path/hf-models/$model_name" echo "============Attempting to download model: $model (Attempt: $((attempt + 1)))============" - huggingface-cli download "$model" --local-dir "$download_path" + huggingface-cli download "$model" --local-dir "$download_path" > /dev/null 2>&1 status=$? if [ $status -eq 0 ]; then