diff --git a/Pipfile b/Pipfile index 845f493f..9a65f88a 100644 --- a/Pipfile +++ b/Pipfile @@ -13,7 +13,7 @@ bothub_backend = {ref = "1.0.12",git = "https://github.com/Ilhasoft/bothub-backe celery = "==4.3.0" gunicorn = "==19.9.0" gevent = "==1.4.0" -bothub_nlp_celery = {ref = "0.1.26",git = "https://github.com/Ilhasoft/bothub-nlp-celery"} +bothub_nlp_celery = {ref = "0.1.28",git = "https://github.com/Ilhasoft/bothub-nlp-celery"} django-environ = "==0.4.5" redis = "==3.5.3" kombu = "==4.5.0" diff --git a/Pipfile.lock b/Pipfile.lock index 91430504..a05bbed4 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "407a3bfccf4c0bac7d7ff74edbd9bc95fa1b1eaea42ed72dca509018e9b95246" + "sha256": "1b4b24796f6fb3383810ba786d024a855dd0924c9d12d817fed3bdbfb9bda752" }, "pipfile-spec": 6, "requires": { @@ -89,7 +89,7 @@ }, "bothub-nlp-celery": { "git": "https://github.com/Ilhasoft/bothub-nlp-celery", - "ref": "f9bc6355ec2ef0853eb631e4da05564b3a3602d9" + "ref": "de8c5ff026fc1313b7840887cb5ddcb2de99422a" }, "cachetools": { "hashes": [ @@ -217,11 +217,11 @@ }, "google-api-core": { "hashes": [ - "sha256:67e33a852dcca7cb7eff49abc35c8cc2c0bb8ab11397dc8306d911505cae2990", - "sha256:779107f17e0fef8169c5239d56a8fbff03f9f72a3893c0c9e5842ec29dfedd54" + "sha256:1166371f6b1164a30cfed26092e3679450bb837a2b425779c8d1dd140c0732f4", + "sha256:6a95bfcf6f661ec0dffd96013b47100765f4f88ce54412562176bfcb11a997c1" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.22.2" + "version": "==1.22.3" }, "google-api-python-client": { "hashes": [ @@ -839,11 +839,11 @@ }, "flake8": { "hashes": [ - "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c", - "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208" + "sha256:749dbbd6bfd0cf1318af27bf97a14e28e5ff548ef8e5b1566ccfb25a11e7c839", + "sha256:aadae8761ec651813c24be05c6f7b4680857ef6afaae4651a4eccaef97ce6c3b" ], "index": "pypi", - "version": "==3.8.3" + "version": "==3.8.4" }, "importlib-metadata": { "hashes": [ @@ -885,11 +885,11 @@ }, "zipp": { "hashes": [ - "sha256:43f4fa8d8bb313e65d8323a3952ef8756bf40f9a5c3ea7334be23ee4ec8278b6", - "sha256:b52f22895f4cfce194bc8172f3819ee8de7540aa6d873535a8668b730b8b411f" + "sha256:64ad89efee774d1897a58607895d80789c59778ea02185dd846ac38394a8642b", + "sha256:eed8ec0b8d1416b2ca33516a37a08892442f3954dee131e92cfd92d8fe3e7066" ], "markers": "python_version >= '3.6'", - "version": "==3.2.0" + "version": "==3.3.0" } } } diff --git a/bothub_nlp_api/handlers/debug_parse.py b/bothub_nlp_api/handlers/debug_parse.py index 3ee128fc..0ce9c21c 100644 --- a/bothub_nlp_api/handlers/debug_parse.py +++ b/bothub_nlp_api/handlers/debug_parse.py @@ -1,8 +1,7 @@ from bothub_nlp_celery.actions import ACTION_DEBUG_PARSE, queue_name from bothub_nlp_celery.app import celery_app from bothub_nlp_celery.tasks import TASK_NLU_DEBUG_PARSE_TEXT -from bothub_nlp_celery.utils import ALGORITHM_TO_LANGUAGE_MODEL -from bothub_nlp_celery import settings as celery_settings +from bothub_nlp_celery.utils import get_language_model from bothub_nlp_api import settings from bothub_nlp_api.utils import AuthorizationIsRequired @@ -43,22 +42,7 @@ def _debug_parse(authorization, text, language, repository_version=None): if not update.get("version"): raise ValidationError("This repository has never been trained") - chosen_algorithm = update.get("algorithm") - # chosen_algorithm = choose_best_algorithm(update.get("language")) - model = ALGORITHM_TO_LANGUAGE_MODEL[chosen_algorithm] - - if (model == "SPACY" and language not in celery_settings.SPACY_LANGUAGES) or ( - model == "BERT" and language not in celery_settings.BERT_LANGUAGES - ): - model = None - - # Send parse to SPACY worker to use name_entities (only if BERT not in use) - if ( - (update.get("use_name_entities")) - and (model is None) - and (language in celery_settings.SPACY_LANGUAGES) - ): - model = "SPACY" + model = get_language_model(update) answer_task = celery_app.send_task( TASK_NLU_DEBUG_PARSE_TEXT, diff --git a/bothub_nlp_api/handlers/evaluate.py b/bothub_nlp_api/handlers/evaluate.py index 13419dc8..61d8ef99 100644 --- a/bothub_nlp_api/handlers/evaluate.py +++ b/bothub_nlp_api/handlers/evaluate.py @@ -1,20 +1,24 @@ from bothub_nlp_celery.actions import ACTION_EVALUATE, queue_name from bothub_nlp_celery.app import celery_app from bothub_nlp_celery.tasks import TASK_NLU_EVALUATE_UPDATE -from bothub_nlp_celery.utils import ALGORITHM_TO_LANGUAGE_MODEL -from bothub_nlp_celery import settings as celery_settings +from bothub_nlp_celery.utils import get_language_model from .. import settings from ..utils import AuthorizationIsRequired from ..utils import DEFAULT_LANGS_PRIORITY from ..utils import ValidationError, get_repository_authorization from ..utils import backend +from ..utils import send_job_train_ai_platform +import time EVALUATE_STATUS_EVALUATED = "evaluated" +EVALUATE_STATUS_PROCESSING = "processing" EVALUATE_STATUS_FAILED = "failed" -def evaluate_handler(authorization, language, repository_version=None): +def evaluate_handler( + authorization, language, repository_version=None, cross_validation=False +): if language and ( language not in settings.SUPPORTED_LANGUAGES.keys() and language not in DEFAULT_LANGS_PRIORITY.keys() @@ -35,40 +39,50 @@ def evaluate_handler(authorization, language, repository_version=None): if not update.get("update"): raise ValidationError("This repository has never been trained") - chosen_algorithm = update.get("algorithm") - # chosen_algorithm = choose_best_algorithm(update.get("language")) - model = ALGORITHM_TO_LANGUAGE_MODEL[chosen_algorithm] - if (model == "SPACY" and language not in celery_settings.SPACY_LANGUAGES) or ( - model == "BERT" and language not in celery_settings.BERT_LANGUAGES - ): - model = None - - # Send evaluate to SPACY worker to use name_entities (only if BERT not in use) - if ( - (update.get("use_name_entities")) - and (model is None) - and (language in celery_settings.SPACY_LANGUAGES) - ): - model = "SPACY" + model = get_language_model(update) try: - evaluate_task = celery_app.send_task( - TASK_NLU_EVALUATE_UPDATE, - args=[ - update.get("repository_version"), - update.get("user_id"), - repository_authorization, - ], - queue=queue_name(update.get("language"), ACTION_EVALUATE, model), - ) - evaluate_task.wait() - evaluate = evaluate_task.result + evaluate = None + if cross_validation is False: + evaluate_task = celery_app.send_task( + TASK_NLU_EVALUATE_UPDATE, + args=[ + update.get("repository_version"), + update.get("user_id"), + repository_authorization, + cross_validation, + ], + queue=queue_name(update.get("language"), ACTION_EVALUATE, model), + ) + evaluate_task.wait() + evaluate = evaluate_task.result + else: + job_id = f'bothub_{settings.ENVIRONMENT}_evaluate_{str(update.get("repository_version"))}_{language}_{str(int(time.time()))}' + send_job_train_ai_platform( + jobId=job_id, + repository_version=str(update.get("repository_version")), + by_id=str(update.get("user_id")), + repository_authorization=str(repository_authorization), + language=language, + type_model=model, + operation="evaluate", + ) + backend().request_backend_save_queue_id( + update_id=str(update.get("repository_version")), + repository_authorization=str(repository_authorization), + task_id=job_id, + from_queue=0, + ) + evaluate_report = { "language": language, - "status": EVALUATE_STATUS_EVALUATED, + "status": EVALUATE_STATUS_PROCESSING, "repository_version": update.get("repository_version"), - "evaluate_id": evaluate.get("id"), - "evaluate_version": evaluate.get("version"), + "evaluate_id": evaluate.get("id") if evaluate is not None else None, + "evaluate_version": evaluate.get("version") + if evaluate is not None + else None, + "cross_validation": cross_validation, } except Exception as e: evaluate_report = {"status": EVALUATE_STATUS_FAILED, "error": str(e)} diff --git a/bothub_nlp_api/handlers/intent_sentence_suggestion.py b/bothub_nlp_api/handlers/intent_sentence_suggestion.py new file mode 100644 index 00000000..fa006aeb --- /dev/null +++ b/bothub_nlp_api/handlers/intent_sentence_suggestion.py @@ -0,0 +1,59 @@ +from bothub_nlp_celery.actions import ACTION_INTENT_SENTENCE_SUGGESTION, queue_name +from bothub_nlp_celery.app import celery_app +from bothub_nlp_celery.tasks import TASK_NLU_INTENT_SENTENCE_SUGGESTION_TEXT +from bothub_nlp_api.utils import get_repository_authorization +from bothub_nlp_api.utils import backend +from bothub_nlp_api import settings +from bothub_nlp_api.utils import ValidationError +from bothub_nlp_api.utils import AuthorizationIsRequired + + +def _intent_sentence_suggestion( + authorization, + language, + intent, + n_sentences_to_generate, + percentage_to_replace, + repository_version=None, +): + print(authorization) + from ..utils import DEFAULT_LANGS_PRIORITY + + if language and ( + language not in settings.SUPPORTED_LANGUAGES.keys() + and language not in DEFAULT_LANGS_PRIORITY.keys() + ): + raise ValidationError("Language '{}' not supported by now.".format(language)) + + repository_authorization = get_repository_authorization(authorization) + if not repository_authorization: + raise AuthorizationIsRequired() + + try: + update = backend().request_backend_parse( + repository_authorization, language, repository_version + ) + except Exception: + update = {} + answer_task = celery_app.send_task( + TASK_NLU_INTENT_SENTENCE_SUGGESTION_TEXT, + args=[ + update.get("repository_version"), + repository_authorization, + intent, + percentage_to_replace, + n_sentences_to_generate, + ], + queue=queue_name(language, ACTION_INTENT_SENTENCE_SUGGESTION, "SPACY"), + ) + answer_task.wait() + answer = answer_task.result + answer.update( + { + "language": language, + "n_sentences_to_generate": n_sentences_to_generate, + "percentage_to_replace": percentage_to_replace, + "intent": intent, + } + ) + return answer diff --git a/bothub_nlp_api/handlers/parse.py b/bothub_nlp_api/handlers/parse.py index 6c03ccd3..6cdc38d8 100644 --- a/bothub_nlp_api/handlers/parse.py +++ b/bothub_nlp_api/handlers/parse.py @@ -5,8 +5,7 @@ from bothub_nlp_celery.actions import ACTION_PARSE, queue_name from bothub_nlp_celery.app import celery_app from bothub_nlp_celery.tasks import TASK_NLU_PARSE_TEXT -from bothub_nlp_celery.utils import ALGORITHM_TO_LANGUAGE_MODEL -from bothub_nlp_celery import settings as celery_settings +from bothub_nlp_celery.utils import get_language_model from bothub_nlp_api import settings from bothub_nlp_api.utils import AuthorizationIsRequired @@ -94,23 +93,7 @@ def _parse( if not update.get("version"): raise ValidationError("This repository has never been trained") - chosen_algorithm = update.get("algorithm") - # chosen_algorithm = choose_best_algorithm(update.get("language")) - model = ALGORITHM_TO_LANGUAGE_MODEL[chosen_algorithm] - - language = update.get("language") - if (model == "SPACY" and language not in celery_settings.SPACY_LANGUAGES) or ( - model == "BERT" and language not in celery_settings.BERT_LANGUAGES - ): - model = None - - # Send parse to SPACY worker to use name_entities (only if BERT not in use) - if ( - (update.get("use_name_entities")) - and (model is None) - and (language in celery_settings.SPACY_LANGUAGES) - ): - model = "SPACY" + model = get_language_model(update) answer_task = celery_app.send_task( TASK_NLU_PARSE_TEXT, diff --git a/bothub_nlp_api/handlers/score_calculation.py b/bothub_nlp_api/handlers/score_calculation.py new file mode 100644 index 00000000..f4cb8ada --- /dev/null +++ b/bothub_nlp_api/handlers/score_calculation.py @@ -0,0 +1,32 @@ +from bothub_nlp_celery.actions import ACTION_SCORE_CALCULATION, queue_name + +from ..utils import backend, get_repository_authorization +from ..utils import AuthorizationIsRequired + + +from bothub_nlp_celery.app import celery_app +from bothub_nlp_celery.tasks import TASK_NLU_SCORE_CALCULATION + + +def score_handler(authorization, repository_version, language): + + repository_authorization = get_repository_authorization(authorization) + + if not repository_authorization: + raise AuthorizationIsRequired() + + try: + update = backend().request_backend_train( + repository_authorization, language, repository_version + ) + except Exception: + update = {} + + answer_task = celery_app.send_task( + TASK_NLU_SCORE_CALCULATION, + args=[update.get("current_version_id"), repository_authorization], + queue=queue_name(language, ACTION_SCORE_CALCULATION), + ) + answer_task.wait() + + return answer_task.result diff --git a/bothub_nlp_api/handlers/train.py b/bothub_nlp_api/handlers/train.py index 5df92c4d..5dc6e300 100644 --- a/bothub_nlp_api/handlers/train.py +++ b/bothub_nlp_api/handlers/train.py @@ -2,12 +2,10 @@ from bothub_nlp_celery.actions import ACTION_TRAIN, queue_name from bothub_nlp_celery.app import celery_app from bothub_nlp_celery.tasks import TASK_NLU_TRAIN_UPDATE -from bothub_nlp_celery.utils import ALGORITHM_TO_LANGUAGE_MODEL -from bothub_nlp_celery import settings as celery_settings +from bothub_nlp_celery.utils import get_language_model from .. import settings, utils -from ..utils import backend -from ..utils import get_repository_authorization +from ..utils import backend, get_repository_authorization TRAIN_STATUS_TRAINED = "trained" TRAIN_STATUS_PROCESSING = "processing" @@ -22,52 +20,39 @@ def train_handler(authorization, repository_version=None): for language in settings.SUPPORTED_LANGUAGES.keys(): - current_update = backend().request_backend_train( + update = backend().request_backend_train( repository_authorization, language, repository_version ) - if not current_update.get("ready_for_train"): + if not update.get("ready_for_train"): continue - chosen_algorithm = current_update.get("algorithm") - model = ALGORITHM_TO_LANGUAGE_MODEL[chosen_algorithm] - - if (model == "SPACY" and language not in celery_settings.SPACY_LANGUAGES) or ( - model == "BERT" and language not in celery_settings.BERT_LANGUAGES - ): - model = None - - # Send train to SPACY worker to use name_entities (only if BERT not in use) - if ( - (current_update.get("use_name_entities")) - and (model is None) - and (language in celery_settings.SPACY_LANGUAGES) - ): - model = "SPACY" + model = get_language_model(update) if settings.BOTHUB_SERVICE_TRAIN == "celery": train_task = celery_app.send_task( TASK_NLU_TRAIN_UPDATE, args=[ - current_update.get("current_version_id"), - current_update.get("repository_authorization_user_id"), + update.get("current_version_id"), + update.get("repository_authorization_user_id"), repository_authorization, ], - queue=queue_name(current_update.get("language"), ACTION_TRAIN, model), + queue=queue_name(update.get("language"), ACTION_TRAIN, model), ) train_tasks.append({"task": train_task, "language": language}) elif settings.BOTHUB_SERVICE_TRAIN == "ai-platform": - job_id = f'bothub_{settings.ENVIRONMENT}_train_{str(current_update.get("current_version_id"))}_{language}_{str(int(time.time()))}' + job_id = f'bothub_{settings.ENVIRONMENT}_train_{str(update.get("current_version_id"))}_{language}_{str(int(time.time()))}' utils.send_job_train_ai_platform( jobId=job_id, - repository_version=str(current_update.get("current_version_id")), - by_id=str(current_update.get("repository_authorization_user_id")), + repository_version=str(update.get("current_version_id")), + by_id=str(update.get("repository_authorization_user_id")), repository_authorization=str(repository_authorization), language=language, type_model=model, + operation="train", ) backend().request_backend_save_queue_id( - update_id=str(current_update.get("current_version_id")), + update_id=str(update.get("current_version_id")), repository_authorization=str(repository_authorization), task_id=job_id, from_queue=0, diff --git a/bothub_nlp_api/handlers/word_suggestion.py b/bothub_nlp_api/handlers/word_suggestion.py new file mode 100644 index 00000000..1e1834cd --- /dev/null +++ b/bothub_nlp_api/handlers/word_suggestion.py @@ -0,0 +1,29 @@ +from bothub_nlp_celery.actions import ACTION_WORD_SUGGESTION, queue_name +from bothub_nlp_celery.app import celery_app +from bothub_nlp_celery.tasks import TASK_NLU_WORD_SUGGESTION_TEXT + +from bothub_nlp_api import settings +from bothub_nlp_api.utils import ValidationError + + +def _word_suggestion(text, language, n_words_to_generate): + from ..utils import DEFAULT_LANGS_PRIORITY + + if language and ( + language not in settings.SUPPORTED_LANGUAGES.keys() + and language not in DEFAULT_LANGS_PRIORITY.keys() + ): + raise ValidationError("Language '{}' not supported by now.".format(language)) + + print(queue_name(language, ACTION_WORD_SUGGESTION, "SPACY")) + answer_task = celery_app.send_task( + TASK_NLU_WORD_SUGGESTION_TEXT, + args=[text, n_words_to_generate], + queue=queue_name(language, ACTION_WORD_SUGGESTION, "SPACY"), + ) + answer_task.wait() + answer = answer_task.result + answer.update( + {"text": text, "language": language, "n_words_to_generate": n_words_to_generate} + ) + return answer diff --git a/bothub_nlp_api/handlers/words_distribution.py b/bothub_nlp_api/handlers/words_distribution.py index 19188581..b125d179 100644 --- a/bothub_nlp_api/handlers/words_distribution.py +++ b/bothub_nlp_api/handlers/words_distribution.py @@ -9,11 +9,11 @@ def _words_distribution(authorization, language, repository_version=None): - from ..utils import NEXT_LANGS + from ..utils import DEFAULT_LANGS_PRIORITY if language and ( language not in settings.SUPPORTED_LANGUAGES.keys() - and language not in NEXT_LANGS.keys() + and language not in DEFAULT_LANGS_PRIORITY.keys() ): raise ValidationError("Language '{}' not supported by now.".format(language)) @@ -30,8 +30,9 @@ def _words_distribution(authorization, language, repository_version=None): language, repository_authorization, ], - queue=queue_name(ACTION_WORDS_DISTIRBUTION, language), + queue=queue_name(language, ACTION_WORDS_DISTIRBUTION), ) + answer_task.wait() answer = answer_task.result return answer diff --git a/bothub_nlp_api/models.py b/bothub_nlp_api/models.py index 7c1e2071..a69ba141 100644 --- a/bothub_nlp_api/models.py +++ b/bothub_nlp_api/models.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Any +from typing import List, Dict, Any, Tuple from pydantic import BaseModel @@ -24,6 +24,20 @@ class SentenceSuggestionRequest(BaseModel): percentage_to_replace: float = 0.3 +class IntentSentenceSuggestionRequest(BaseModel): + language: str = None + intent: str = None + n_sentences_to_generate: int = 10 + percentage_to_replace: float = 0.3 + repository_version: int = None + + +class WordSuggestionRequest(BaseModel): + text: str + language: str = None + n_words_to_generate: int = 10 + + class WordsDistributionRequest(BaseModel): language: str = None repository_version: int = None @@ -36,6 +50,7 @@ class TrainRequest(BaseModel): class EvaluateRequest(BaseModel): language: str = None repository_version: int = None + cross_validation: bool = False class IntentResponse(BaseModel): @@ -73,6 +88,16 @@ class SentenceSuggestionResponse(BaseModel): suggested_sentences: List[str] +class IntentSentenceSuggestionResponse(BaseModel): + intent: str + suggested_sentences: List[str] + + +class WordSuggestionResponse(BaseModel): + text: str + similar_words: List[Tuple[str, str]] + + class WordsDistributionResponse(BaseModel): words: Dict[str, Dict[str, float]] @@ -99,6 +124,13 @@ class OtherLabel(BaseModel): value: str +class ScoreResponse(BaseModel): + intentions_balance: Dict[str, Any] + intentions_size: Dict[str, Any] + evaluate_size: Dict[str, Any] + average: float + + class InfoResponse(BaseModel): absolute_url: str algorithm: str @@ -139,8 +171,9 @@ class EvaluateResponse(BaseModel): language: str status: str repository_version: int - evaluate_id: int - evaluate_version: int + evaluate_id: Any + evaluate_version: Any + cross_validation: bool class TaskQueueResponse(BaseModel): diff --git a/bothub_nlp_api/routers/v2.py b/bothub_nlp_api/routers/v2.py index 3f0e7bea..52d34449 100644 --- a/bothub_nlp_api/routers/v2.py +++ b/bothub_nlp_api/routers/v2.py @@ -1,10 +1,12 @@ from fastapi import Depends, APIRouter, Header, HTTPException from starlette.requests import Request -from bothub_nlp_api.handlers import evaluate, task_queue +from bothub_nlp_api.handlers import evaluate, task_queue, score_calculation from bothub_nlp_api.handlers import parse from bothub_nlp_api.handlers import debug_parse from bothub_nlp_api.handlers import sentence_suggestion +from bothub_nlp_api.handlers import intent_sentence_suggestion +from bothub_nlp_api.handlers import word_suggestion from bothub_nlp_api.handlers import words_distribution from bothub_nlp_api.handlers import train from bothub_nlp_api.models import ( @@ -12,6 +14,8 @@ DebugParseRequest, WordsDistributionRequest, SentenceSuggestionRequest, + IntentSentenceSuggestionRequest, + WordSuggestionRequest, WordsDistributionResponse, TrainRequest, EvaluateRequest, @@ -20,8 +24,11 @@ from bothub_nlp_api.models import ParseResponse from bothub_nlp_api.models import DebugParseResponse from bothub_nlp_api.models import SentenceSuggestionResponse +from bothub_nlp_api.models import IntentSentenceSuggestionResponse +from bothub_nlp_api.models import WordSuggestionResponse from bothub_nlp_api.models import TrainResponse from bothub_nlp_api.models import EvaluateResponse +from bothub_nlp_api.models import ScoreResponse from bothub_nlp_api.utils import backend, AuthorizationRequired from bothub_nlp_api.utils import get_repository_authorization @@ -77,6 +84,7 @@ async def sentence_suggestion_post_handler(item: SentenceSuggestionRequest,): item.language, item.n_sentences_to_generate, item.percentage_to_replace, + item.intent, ) @@ -85,6 +93,45 @@ async def sentence_suggestion_options(): return {} # pragma: no cover +@router.post( + r"/intent_sentence_suggestion/?", response_model=IntentSentenceSuggestionResponse +) +async def intent_sentence_suggestion_post_handler( + item: IntentSentenceSuggestionRequest, + request: Request = Depends(AuthorizationRequired()), + Authorization: str = Header(..., description="Bearer your_key"), +): + + return intent_sentence_suggestion._intent_sentence_suggestion( + Authorization, + item.language, + item.intent, + item.n_sentences_to_generate, + item.percentage_to_replace, + item.repository_version, + ) + + +@router.options( + r"/intent_sentence_suggestion/?", status_code=204, include_in_schema=False +) +async def intent_sentence_suggestion_options(): + return {} # pragma: no cover + + +@router.post(r"/word_suggestion/?", response_model=WordSuggestionResponse) +async def word_suggestion_post_handler(item: WordSuggestionRequest,): + + return word_suggestion._word_suggestion( + item.text, item.language, item.n_words_to_generate + ) + + +@router.options(r"/word_suggestion/?", status_code=204, include_in_schema=False) +async def word_suggestion_options(): + return {} # pragma: no cover + + @router.post(r"/words_distribution/?", response_model=WordsDistributionResponse) async def words_distribution_post_handler( item: WordsDistributionRequest, @@ -143,7 +190,7 @@ async def evaluate_handler( Authorization: str = Header(..., description="Bearer your_key"), ): result = evaluate.evaluate_handler( - Authorization, item.language, item.repository_version + Authorization, item.language, item.repository_version, item.cross_validation ) if result.get("status") and result.get("error"): raise HTTPException(status_code=400, detail=result) @@ -159,3 +206,22 @@ async def evaluate_options(): async def task_queue_handler(id_task: str, from_queue: str): return task_queue.task_queue_handler(id_task, from_queue) + + +@router.get(r"/score/?", response_model=ScoreResponse) +async def score_handler( + repository_version: str, + language: str, + request: Request = Depends(AuthorizationRequired()), + authorization: str = Header(..., description="Bearer your_key"), +): + result = score_calculation.score_handler( + authorization, repository_version, language + ) + + return result + + +@router.options(r"/score/?") +async def score_options(): + return {} # pragma: no cover diff --git a/bothub_nlp_api/utils.py b/bothub_nlp_api/utils.py index cd6eb39e..353443c7 100644 --- a/bothub_nlp_api/utils.py +++ b/bothub_nlp_api/utils.py @@ -89,10 +89,18 @@ def get_train_job_status(job_name): def send_job_train_ai_platform( - jobId, repository_version, by_id, repository_authorization, language, type_model + jobId, + repository_version, + by_id, + repository_authorization, + language, + type_model, + operation="train", ): image_sufix = f"-{language}-{type_model}" if type_model is not None else "-xx-SPACY" args = [ + "--operation", + operation, "--repository-version", repository_version, "--by-id", @@ -104,6 +112,7 @@ def send_job_train_ai_platform( "--AIPLATFORM_LANGUAGE_QUEUE", language, ] + if type_model is not None: args.extend(["--AIPLATFORM_LANGUAGE_MODEL", type_model]) training_inputs = {