Skip to content

Commit

Permalink
change default model to gpt4o-mini (#1166)
Browse files Browse the repository at this point in the history
Reasons
1. gpt4o-mini is 2x cheaper than the current default model of
gpt3.5-turbo
2. Extended context length of 128k tokens
3. All the public benchmarks indicate better performance on all tasks by
gpt4o-mini compared to gpt3.5-turbo.
[Source](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/)
  • Loading branch information
shahules786 committed Aug 5, 2024
1 parent dadc410 commit 1bcbf20
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion src/ragas/llms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ async def agenerate_text(


def llm_factory(
model: str = "gpt-3.5-turbo", run_config: t.Optional[RunConfig] = None
model: str = "gpt-4o-mini", run_config: t.Optional[RunConfig] = None
) -> BaseRagasLLM:
timeout = None
if run_config is not None:
Expand Down
4 changes: 2 additions & 2 deletions src/ragas/testset/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,8 @@ def from_llama_index(
@deprecated("0.1.4", removal="0.2.0", alternative="from_langchain")
def with_openai(
cls,
generator_llm: str = "gpt-3.5-turbo-16k",
critic_llm: str = "gpt-4",
generator_llm: str = "gpt-4o-mini",
critic_llm: str = "gpt-4o",
embeddings: str = "text-embedding-ada-002",
docstore: t.Optional[DocumentStore] = None,
chunk_size: int = 1024,
Expand Down

0 comments on commit 1bcbf20

Please sign in to comment.