From defb149f3427c3811ffc36ae8dc7777d88e1d642 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 06:47:14 +0000 Subject: [PATCH 1/2] chore(deps): update ollama/ollama docker tag to v0.3.11 --- apps/ollama-amd/docker-compose.yml | 2 +- apps/ollama-cpu/docker-compose.yml | 2 +- apps/ollama-nvidia/docker-compose.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/ollama-amd/docker-compose.yml b/apps/ollama-amd/docker-compose.yml index b2f3212b4f..6475eec6bc 100755 --- a/apps/ollama-amd/docker-compose.yml +++ b/apps/ollama-amd/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-amd: - image: ollama/ollama:0.3.10-rocm + image: ollama/ollama:0.3.11-rocm restart: unless-stopped container_name: ollama-amd environment: diff --git a/apps/ollama-cpu/docker-compose.yml b/apps/ollama-cpu/docker-compose.yml index 33a56b0729..cf24b12d94 100755 --- a/apps/ollama-cpu/docker-compose.yml +++ b/apps/ollama-cpu/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-cpu: - image: ollama/ollama:0.3.10 + image: ollama/ollama:0.3.11 restart: unless-stopped container_name: ollama-cpu ports: diff --git a/apps/ollama-nvidia/docker-compose.yml b/apps/ollama-nvidia/docker-compose.yml index e744c08c5c..5aab592e85 100755 --- a/apps/ollama-nvidia/docker-compose.yml +++ b/apps/ollama-nvidia/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-nvidia: - image: ollama/ollama:0.3.10 + image: ollama/ollama:0.3.11 restart: unless-stopped container_name: ollama-nvidia ports: From c98b32186294418e174a2914b04dd757dc4ec914 Mon Sep 17 00:00:00 2001 From: Tipi CI Date: Wed, 18 Sep 2024 06:48:02 +0000 Subject: [PATCH 2/2] Update app version [ready] --- apps/ollama-amd/config.json | 6 +++--- apps/ollama-cpu/config.json | 6 +++--- apps/ollama-nvidia/config.json | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/apps/ollama-amd/config.json b/apps/ollama-amd/config.json index e57360368d..e0cc467e35 100644 --- a/apps/ollama-amd/config.json +++ b/apps/ollama-amd/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11434, "id": "ollama-amd", - "tipi_version": 32, - "version": "0.3.10-rocm", + "tipi_version": 33, + "version": "0.3.11-rocm", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", @@ -16,5 +16,5 @@ "form_fields": [], "supported_architectures": ["arm64", "amd64"], "created_at": 1691943801422, - "updated_at": 1725788642000 + "updated_at": 1726642067000 } diff --git a/apps/ollama-cpu/config.json b/apps/ollama-cpu/config.json index 5831db26c0..04bcbf22bf 100644 --- a/apps/ollama-cpu/config.json +++ b/apps/ollama-cpu/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11436, "id": "ollama-cpu", - "tipi_version": 32, - "version": "0.3.10", + "tipi_version": 33, + "version": "0.3.11", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", @@ -16,5 +16,5 @@ "form_fields": [], "supported_architectures": ["arm64", "amd64"], "created_at": 1691943801422, - "updated_at": 1725788644000 + "updated_at": 1726642077000 } diff --git a/apps/ollama-nvidia/config.json b/apps/ollama-nvidia/config.json index 68e579fbd6..b1b862f647 100644 --- a/apps/ollama-nvidia/config.json +++ b/apps/ollama-nvidia/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11435, "id": "ollama-nvidia", - "tipi_version": 32, - "version": "0.3.10", + "tipi_version": 33, + "version": "0.3.11", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", @@ -16,5 +16,5 @@ "form_fields": [], "supported_architectures": ["arm64", "amd64"], "created_at": 1691943801422, - "updated_at": 1725788647000 + "updated_at": 1726642079000 }