diff --git a/doc/gui/examples/charts/error-bars-asymmetric.py b/doc/gui/examples/charts/error-bars-asymmetric.py index 1ac8c35fab..316d1638ed 100644 --- a/doc/gui/examples/charts/error-bars-asymmetric.py +++ b/doc/gui/examples/charts/error-bars-asymmetric.py @@ -22,22 +22,14 @@ # y values: [0..n_samples-1] y = range(0, n_samples) -data = { - # The x series is made of random numbers between 1 and 10 - "x": [random.uniform(1, 10) for i in y], - "y": y, -} +data = {"x": [random.uniform(1, 10) for _ in y], "y": y} options = { "error_x": { "type": "data", - # Allows for a 'plus' and a 'minus' error data "symmetric": False, - # The 'plus' error data is a series of random numbers - "array": [random.uniform(0, 5) for i in y], - # The 'minus' error data is a series of random numbers - "arrayminus": [random.uniform(0, 2) for i in y], - # Color of the error bar + "array": [random.uniform(0, 5) for _ in y], + "arrayminus": [random.uniform(0, 2) for _ in y], "color": "red", } } diff --git a/doc/gui/examples/charts/histogram-cumulative.py b/doc/gui/examples/charts/histogram-cumulative.py index 78bf096ef6..bd7703b8a2 100644 --- a/doc/gui/examples/charts/histogram-cumulative.py +++ b/doc/gui/examples/charts/histogram-cumulative.py @@ -18,7 +18,7 @@ from taipy.gui import Gui # Random data set -data = [random.random() for i in range(500)] +data = [random.random() for _ in range(500)] options = { # Enable the cumulative histogram diff --git a/doc/gui/examples/charts/histogram-horizontal.py b/doc/gui/examples/charts/histogram-horizontal.py index 222cdc64dd..6de42b67ea 100644 --- a/doc/gui/examples/charts/histogram-horizontal.py +++ b/doc/gui/examples/charts/histogram-horizontal.py @@ -18,7 +18,7 @@ from taipy.gui import Gui # Random data set -data = {"Count": [random.random() for i in range(100)]} +data = {"Count": [random.random() for _ in range(100)]} page = """ # Histograms - Horizontal diff --git a/doc/gui/examples/charts/histogram-nbins.py b/doc/gui/examples/charts/histogram-nbins.py index 4dd91050ed..365e00f02c 100644 --- a/doc/gui/examples/charts/histogram-nbins.py +++ b/doc/gui/examples/charts/histogram-nbins.py @@ -18,7 +18,7 @@ from taipy.gui import Gui # Random set of 100 samples -samples = {"x": [random.gauss() for i in range(100)]} +samples = {"x": [random.gauss() for _ in range(100)]} # Use the same data for both traces data = [samples, samples] diff --git a/doc/gui/examples/charts/histogram-normalized.py b/doc/gui/examples/charts/histogram-normalized.py index d9fae3bca9..188abcd4be 100644 --- a/doc/gui/examples/charts/histogram-normalized.py +++ b/doc/gui/examples/charts/histogram-normalized.py @@ -18,7 +18,7 @@ from taipy.gui import Gui # Random data set -data = [random.random() for i in range(100)] +data = [random.random() for _ in range(100)] # Normalize to show bin probabilities options = {"histnorm": "probability"} diff --git a/doc/gui/examples/charts/histogram-overlay.py b/doc/gui/examples/charts/histogram-overlay.py index 1752557e39..e33ca434d7 100644 --- a/doc/gui/examples/charts/histogram-overlay.py +++ b/doc/gui/examples/charts/histogram-overlay.py @@ -18,7 +18,10 @@ from taipy.gui import Gui # Data set made of two series of random numbers -data = [{"x": [random.random() + 1 for i in range(100)]}, {"x": [random.random() + 1.1 for i in range(100)]}] +data = [ + {"x": [random.random() + 1 for _ in range(100)]}, + {"x": [random.random() + 1.1 for _ in range(100)]}, +] options = [ # First data set displayed as semi-transparent, green bars diff --git a/doc/gui/examples/charts/histogram-simple.py b/doc/gui/examples/charts/histogram-simple.py index f2da24265f..0f04eb7f72 100644 --- a/doc/gui/examples/charts/histogram-simple.py +++ b/doc/gui/examples/charts/histogram-simple.py @@ -18,7 +18,7 @@ from taipy import Gui # Random data set -data = [random.gauss(0, 5) for i in range(1000)] +data = [random.gauss(0, 5) for _ in range(1000)] page = """ # Histogram - Simple diff --git a/doc/gui/examples/charts/histogram-stacked.py b/doc/gui/examples/charts/histogram-stacked.py index 9244fc96d6..9d05c6c740 100644 --- a/doc/gui/examples/charts/histogram-stacked.py +++ b/doc/gui/examples/charts/histogram-stacked.py @@ -18,7 +18,10 @@ from taipy.gui import Gui # Data set made of two series of random numbers -data = {"A": [random.random() for i in range(200)], "B": [random.random() for i in range(200)]} +data = { + "A": [random.random() for _ in range(200)], + "B": [random.random() for _ in range(200)], +} # Names of the two traces names = ["A samples", "B samples"] diff --git a/doc/gui/examples/charts/treemap-simple.py b/doc/gui/examples/charts/treemap-simple.py index 1279844aeb..02438cb010 100644 --- a/doc/gui/examples/charts/treemap-simple.py +++ b/doc/gui/examples/charts/treemap-simple.py @@ -18,10 +18,10 @@ # Data set: the first 10 elements of the Fibonacci sequence n_numbers = 10 fibonacci = [0, 1] -for i in range(2, n_numbers): - fibonacci.append(fibonacci[i - 1] + fibonacci[i - 2]) - -data = {"index": [i for i in range(1, n_numbers + 1)], "fibonacci": fibonacci} +fibonacci.extend( + fibonacci[i - 1] + fibonacci[i - 2] for i in range(2, n_numbers) +) +data = {"index": list(range(1, n_numbers + 1)), "fibonacci": fibonacci} page = """ # TreeMap - Simple diff --git a/taipy/config/__init__.py b/taipy/config/__init__.py index 2812333ad7..0b34e13b28 100644 --- a/taipy/config/__init__.py +++ b/taipy/config/__init__.py @@ -43,8 +43,11 @@ def func_with_doc(section, attribute_name, default, configuration_methods, add_t for exposed_configuration_method, configuration_method in configuration_methods: annotation = " @staticmethod\n" - sign = " def " + exposed_configuration_method + str(signature(configuration_method)) + ":\n" - doc = ' """' + configuration_method.__doc__ + '"""\n' + sign = ( + f" def {exposed_configuration_method}{str(signature(configuration_method))}" + + ":\n" + ) + doc = f' """{configuration_method.__doc__}' + '"""\n' content = " pass\n\n" f.write(annotation + sign + doc + content) return func(section, attribute_name, default, configuration_methods, add_to_unconflicted_sections) diff --git a/taipy/config/_config.py b/taipy/config/_config.py index d11d0a88cf..4b77e27a14 100644 --- a/taipy/config/_config.py +++ b/taipy/config/_config.py @@ -68,11 +68,9 @@ def __update_sections(self, entity_config, other_entity_configs): entity_config[self.DEFAULT_KEY] = other_entity_configs[self.DEFAULT_KEY] for cfg_id, sub_config in other_entity_configs.items(): if cfg_id != self.DEFAULT_KEY: - if cfg_id in entity_config: - entity_config[cfg_id]._update(sub_config._to_dict(), entity_config.get(self.DEFAULT_KEY)) - else: + if cfg_id not in entity_config: entity_config[cfg_id] = copy(sub_config) - entity_config[cfg_id]._update(sub_config._to_dict(), entity_config.get(self.DEFAULT_KEY)) + entity_config[cfg_id]._update(sub_config._to_dict(), entity_config.get(self.DEFAULT_KEY)) self.__point_nested_section_to_self(sub_config) def __point_nested_section_to_self(self, section): diff --git a/taipy/config/_serializer/_base_serializer.py b/taipy/config/_serializer/_base_serializer.py index de231f3769..ff6a129115 100644 --- a/taipy/config/_serializer/_base_serializer.py +++ b/taipy/config/_serializer/_base_serializer.py @@ -56,25 +56,25 @@ def _stringify(cls, as_dict): if as_dict is None: return None if isinstance(as_dict, Section): - return as_dict.id + ":SECTION" + return f"{as_dict.id}:SECTION" if isinstance(as_dict, Scope): - return as_dict.name + ":SCOPE" + return f"{as_dict.name}:SCOPE" if isinstance(as_dict, Frequency): - return as_dict.name + ":FREQUENCY" + return f"{as_dict.name}:FREQUENCY" if isinstance(as_dict, bool): - return str(as_dict) + ":bool" + return f"{str(as_dict)}:bool" if isinstance(as_dict, int): - return str(as_dict) + ":int" + return f"{str(as_dict)}:int" if isinstance(as_dict, float): - return str(as_dict) + ":float" + return f"{str(as_dict)}:float" if isinstance(as_dict, datetime): - return as_dict.isoformat() + ":datetime" + return f"{as_dict.isoformat()}:datetime" if isinstance(as_dict, timedelta): - return cls._timedelta_to_str(as_dict) + ":timedelta" + return f"{cls._timedelta_to_str(as_dict)}:timedelta" if inspect.isfunction(as_dict) or isinstance(as_dict, types.BuiltinFunctionType): - return as_dict.__module__ + "." + as_dict.__name__ + ":function" + return f"{as_dict.__module__}.{as_dict.__name__}:function" if inspect.isclass(as_dict): - return as_dict.__module__ + "." + as_dict.__qualname__ + ":class" + return f"{as_dict.__module__}.{as_dict.__qualname__}:class" if isinstance(as_dict, dict): return {str(key): cls._stringify(val) for key, val in as_dict.items()} if isinstance(as_dict, list): @@ -115,8 +115,7 @@ def _pythonify(cls, val): r"^(.+):(\bbool\b|\bstr\b|\bint\b|\bfloat\b|\bdatetime\b||\btimedelta\b|" r"\bfunction\b|\bclass\b|\bSCOPE\b|\bFREQUENCY\b|\bSECTION\b)?$" ) - match = re.fullmatch(TYPE_PATTERN, str(val)) - if match: + if match := re.fullmatch(TYPE_PATTERN, str(val)): actual_val = match.group(1) dynamic_type = match.group(2) if dynamic_type == "SECTION": diff --git a/taipy/config/checker/_checkers/_config_checker.py b/taipy/config/checker/_checkers/_config_checker.py index 9887424cc8..4d52a29c89 100644 --- a/taipy/config/checker/_checkers/_config_checker.py +++ b/taipy/config/checker/_checkers/_config_checker.py @@ -51,17 +51,16 @@ def _check_children( config_value, f"{config_key} field of {parent_config_class.__name__} `{config_id}` is empty.", ) - else: - if not ( - (isinstance(config_value, List) or isinstance(config_value, Set)) - and all(map(lambda x: isinstance(x, child_config_class), config_value)) - ): - self._error( - config_key, - config_value, - f"{config_key} field of {parent_config_class.__name__} `{config_id}` must be populated with a list " - f"of {child_config_class.__name__} objects.", - ) + elif not ( + (isinstance(config_value, (List, Set))) + and all(map(lambda x: isinstance(x, child_config_class), config_value)) + ): + self._error( + config_key, + config_value, + f"{config_key} field of {parent_config_class.__name__} `{config_id}` must be populated with a list " + f"of {child_config_class.__name__} objects.", + ) def _check_existing_config_id(self, config): if not config.id: diff --git a/taipy/config/common/_config_blocker.py b/taipy/config/common/_config_blocker.py index bf9ae4b9d0..ac9cb93d27 100644 --- a/taipy/config/common/_config_blocker.py +++ b/taipy/config/common/_config_blocker.py @@ -40,7 +40,7 @@ def _check_if_is_blocking(*args, **kwargs): " modifying the Configuration. For more information, please refer to:" " https://docs.taipy.io/en/latest/manuals/running_services/#running-core." ) - cls.__logger.error("ConfigurationUpdateBlocked: " + error_message) + cls.__logger.error(f"ConfigurationUpdateBlocked: {error_message}") raise ConfigurationUpdateBlocked(error_message) return f(*args, **kwargs) diff --git a/taipy/config/common/_template_handler.py b/taipy/config/common/_template_handler.py index 12273f6880..c404053342 100644 --- a/taipy/config/common/_template_handler.py +++ b/taipy/config/common/_template_handler.py @@ -43,8 +43,7 @@ def _replace_templates(cls, template, type=str, required=True, default=None): def _replace_template(cls, template, type, required, default): if "ENV" not in str(template): return template - match = re.fullmatch(cls._PATTERN, str(template)) - if match: + if match := re.fullmatch(cls._PATTERN, str(template)): var = match.group(1) dynamic_type = match.group(3) val = os.environ.get(var) @@ -77,7 +76,7 @@ def _to_bool(val: str) -> bool: possible_values = ["true", "false"] if str.lower(val) not in possible_values: raise InconsistentEnvVariableError("{val} is not a Boolean.") - return str.lower(val) == "true" or not (str.lower(val) == "false") + return str.lower(val) == "true" or str.lower(val) != "false" @staticmethod def _to_int(val: str) -> int: diff --git a/taipy/config/config.py b/taipy/config/config.py index 4b86aaa339..e6ede7c4f0 100644 --- a/taipy/config/config.py +++ b/taipy/config/config.py @@ -178,11 +178,10 @@ def _register_default(cls, default_section: Section): cls._default_config._unique_sections[default_section.name]._update(default_section._to_dict()) else: cls._default_config._unique_sections[default_section.name] = default_section + elif def_sections := cls._default_config._sections.get(default_section.name, None): + def_sections[default_section.id] = default_section else: - if def_sections := cls._default_config._sections.get(default_section.name, None): - def_sections[default_section.id] = default_section - else: - cls._default_config._sections[default_section.name] = {default_section.id: default_section} + cls._default_config._sections[default_section.name] = {default_section.id: default_section} cls._serializer._section_class[default_section.name] = default_section.__class__ # type: ignore cls.__json_serializer._section_class[default_section.name] = default_section.__class__ # type: ignore cls._compile_configs() @@ -195,14 +194,13 @@ def _register(cls, section): cls._python_config._unique_sections[section.name]._update(section._to_dict()) else: cls._python_config._unique_sections[section.name] = section - else: - if sections := cls._python_config._sections.get(section.name, None): - if sections.get(section.id, None): - sections[section.id]._update(section._to_dict()) - else: - sections[section.id] = section + elif sections := cls._python_config._sections.get(section.name, None): + if sections.get(section.id, None): + sections[section.id]._update(section._to_dict()) else: - cls._python_config._sections[section.name] = {section.id: section} + sections[section.id] = section + else: + cls._python_config._sections[section.name] = {section.id: section} cls._serializer._section_class[section.name] = section.__class__ cls.__json_serializer._section_class[section.name] = section.__class__ cls._compile_configs() diff --git a/taipy/core/_backup/_backup.py b/taipy/core/_backup/_backup.py index 825dd9d0cb..6bd4d34897 100644 --- a/taipy/core/_backup/_backup.py +++ b/taipy/core/_backup/_backup.py @@ -32,27 +32,32 @@ def _append_to_backup_file(new_file_path: str): def _remove_from_backup_file(to_remove_file_path: str): - if preserve_file_path := os.getenv(__BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME, None): - storage_folder = os.path.abspath(Config.core.storage_folder) + os.sep - if not os.path.abspath(to_remove_file_path).startswith(storage_folder): - try: - with open(preserve_file_path, "r+") as f: - old_backup = f.read() - to_remove_file_path = to_remove_file_path + "\n" - - # To avoid removing the file path of different data nodes that are pointing - # to the same file. We will only replace the file path only once. - if old_backup.startswith(to_remove_file_path): - new_backup = old_backup.replace(to_remove_file_path, "", 1) - else: - new_backup = old_backup.replace("\n" + to_remove_file_path, "\n", 1) - - if new_backup is not old_backup: - f.seek(0) - f.write(new_backup) - f.truncate() - except Exception: - pass + if not ( + preserve_file_path := os.getenv( + __BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME, None + ) + ): + return + storage_folder = os.path.abspath(Config.core.storage_folder) + os.sep + if not os.path.abspath(to_remove_file_path).startswith(storage_folder): + try: + with open(preserve_file_path, "r+") as f: + old_backup = f.read() + to_remove_file_path += "\n" + + # To avoid removing the file path of different data nodes that are pointing + # to the same file. We will only replace the file path only once. + if old_backup.startswith(to_remove_file_path): + new_backup = old_backup.replace(to_remove_file_path, "", 1) + else: + new_backup = old_backup.replace("\n" + to_remove_file_path, "\n", 1) + + if new_backup is not old_backup: + f.seek(0) + f.write(new_backup) + f.truncate() + except Exception: + pass def _replace_in_backup_file(old_file_path: str, new_file_path: str): diff --git a/taipy/core/_core_cli.py b/taipy/core/_core_cli.py index a828211934..10fa2f4409 100644 --- a/taipy/core/_core_cli.py +++ b/taipy/core/_core_cli.py @@ -113,6 +113,6 @@ def parse_arguments(cls): @classmethod def __add_taipy_prefix(cls, key: str): if key.startswith("--no-"): - return key[:5] + "taipy-" + key[5:] + return f"{key[:5]}taipy-{key[5:]}" - return key[:2] + "taipy-" + key[2:] + return f"{key[:2]}taipy-{key[2:]}" diff --git a/taipy/core/_entity/_dag.py b/taipy/core/_entity/_dag.py index cf27c2fa24..92ead2d85a 100644 --- a/taipy/core/_entity/_dag.py +++ b/taipy/core/_entity/_dag.py @@ -31,7 +31,7 @@ def __init__(self, src: _Node, dest: _Node): class _DAG: def __init__(self, dag: nx.DiGraph): - self._sorted_nodes = list(nodes for nodes in nx.topological_generations(dag)) + self._sorted_nodes = list(nx.topological_generations(dag)) self._length, self._width = self.__compute_size() self._grid_length, self._grid_width = self.__compute_grid_size() self._nodes = self.__compute_nodes() @@ -54,7 +54,7 @@ def edges(self) -> List[_Edge]: return self._edges def __compute_size(self) -> Tuple[int, int]: - return len(self._sorted_nodes), max([len(i) for i in self._sorted_nodes]) + return len(self._sorted_nodes), max(len(i) for i in self._sorted_nodes) def __compute_grid_size(self) -> Tuple[int, int]: if self._width == 1: @@ -65,8 +65,7 @@ def __compute_grid_size(self) -> Tuple[int, int]: def __compute_nodes(self) -> Dict[str, _Node]: nodes = {} - x = 0 - for same_lvl_nodes in self._sorted_nodes: + for x, same_lvl_nodes in enumerate(self._sorted_nodes): lcl_wdt = len(same_lvl_nodes) is_max = lcl_wdt != self.width if self.width != 1: @@ -77,14 +76,13 @@ def __compute_nodes(self) -> Dict[str, _Node]: for node in same_lvl_nodes: y += y_incr nodes[node.id] = _Node(node, x, y) - x += 1 return nodes def __compute_edges(self, dag) -> List[_Edge]: - edges = [] - for edge in dag.edges(): - edges.append(_Edge(self.nodes[edge[0].id], self.nodes[edge[1].id])) - return edges + return [ + _Edge(self.nodes[edge[0].id], self.nodes[edge[1].id]) + for edge in dag.edges() + ] @staticmethod def __lcm(*integers) -> int: diff --git a/taipy/core/_entity/_entity.py b/taipy/core/_entity/_entity.py index d1d0b3f43b..ccaec4ab20 100644 --- a/taipy/core/_entity/_entity.py +++ b/taipy/core/_entity/_entity.py @@ -22,7 +22,7 @@ class _Entity: def __enter__(self): self._is_in_context = True - self._in_context_attributes_changed_collector = list() + self._in_context_attributes_changed_collector = [] return self def __exit__(self, exc_type, exc_value, exc_traceback): diff --git a/taipy/core/_entity/_labeled.py b/taipy/core/_entity/_labeled.py index ff950f6a3b..c8fb4b05ca 100644 --- a/taipy/core/_entity/_labeled.py +++ b/taipy/core/_entity/_labeled.py @@ -57,9 +57,7 @@ def _get_explicit_label(self) -> Optional[str]: return None def _get_owner_id(self) -> Optional[str]: - if hasattr(self, "owner_id"): - return getattr(self, "owner_id") - return None + return getattr(self, "owner_id") if hasattr(self, "owner_id") else None def _get_name(self) -> Optional[str]: if hasattr(self, "name"): @@ -69,9 +67,7 @@ def _get_name(self) -> Optional[str]: return None def _get_config_id(self) -> Optional[str]: - if hasattr(self, "config_id"): - return getattr(self, "config_id") - return None + return getattr(self, "config_id") if hasattr(self, "config_id") else None def _generate_entity_label(self) -> str: if name := self._get_name(): diff --git a/taipy/core/_entity/_migrate/_utils.py b/taipy/core/_entity/_migrate/_utils.py index b53165a9aa..447e4cb752 100644 --- a/taipy/core/_entity/_migrate/_utils.py +++ b/taipy/core/_entity/_migrate/_utils.py @@ -48,8 +48,8 @@ def __search_parent_ids(entity_id: str, data: Dict) -> List: if entity_id in entity_data["input_ids"] or entity_id in entity_data["output_ids"]: parents.append(_id) - if entity_type == "TASK" and "SCENARIO" in _id: - if entity_id in entity_data["tasks"]: + if entity_id in entity_data["tasks"]: + if entity_type == "TASK" and "SCENARIO" in _id: parents.append(_id) parents.sort() return parents @@ -60,8 +60,8 @@ def __search_parent_config(entity_id: str, config: Dict, entity_type: str) -> Li possible_parents = "TASK" if entity_type == "DATA_NODE" else "SCENARIO" data = config[possible_parents] + section_id = f"{entity_id}:SECTION" for _id, entity_data in data.items(): - section_id = f"{entity_id}:SECTION" if entity_type == "DATANODE" and possible_parents == "TASK": if section_id in entity_data["input_ids"] or section_id in entity_data["output_ids"]: parents.append(section_id) @@ -281,7 +281,7 @@ def __migrate_entities(entity_type: str, data: Dict) -> Dict: _entities = {k: data[k] for k in data if entity_type in k} for k, v in _entities.items(): - if entity_type in ["JOB", "VERSION"]: + if entity_type in {"JOB", "VERSION"}: v["data"] = migration_fct(v["data"]) # type: ignore else: v["data"] = migration_fct(v["data"], data) # type: ignore diff --git a/taipy/core/_entity/_migrate_cli.py b/taipy/core/_entity/_migrate_cli.py index 994f870cff..fb60d7862b 100644 --- a/taipy/core/_entity/_migrate_cli.py +++ b/taipy/core/_entity/_migrate_cli.py @@ -77,7 +77,7 @@ def parse_arguments(cls): if args.remove_backup: cls.__handle_remove_backup(repository_type, repository_args) - do_backup = False if args.skip_backup else True + do_backup = not args.skip_backup cls.__migrate_entities(repository_type, repository_args, do_backup) sys.exit(0) @@ -124,14 +124,14 @@ def __migrate_entities(cls, repository_type: str, repository_args: List, do_back if not _migrate_fs_entities(path, do_backup): sys.exit(1) - elif repository_type == "sql": - if not _migrate_sql_entities(repository_args[0], do_backup): - sys.exit(1) - elif repository_type == "mongo": mongo_args = repository_args[1:5] if repository_args[0] else [] _migrate_mongo_entities(*mongo_args, backup=do_backup) # type: ignore + elif repository_type == "sql": + if not _migrate_sql_entities(repository_args[0], do_backup): + sys.exit(1) + else: cls.__logger.error(f"Unknown repository type {repository_type}") sys.exit(1) diff --git a/taipy/core/_entity/_reload.py b/taipy/core/_entity/_reload.py index 542bf4546a..bbec207d4e 100644 --- a/taipy/core/_entity/_reload.py +++ b/taipy/core/_entity/_reload.py @@ -21,10 +21,10 @@ class _Reloader: _no_reload_context = False - def __new__(class_, *args, **kwargs): - if not isinstance(class_._instance, class_): - class_._instance = object.__new__(class_, *args, **kwargs) - return class_._instance + def __new__(cls, *args, **kwargs): + if not isinstance(cls._instance, cls): + cls._instance = object.__new__(cls, *args, **kwargs) + return cls._instance def _reload(self, manager: str, obj): if self._no_reload_context: @@ -65,10 +65,7 @@ def __set_entity(fct): def _do_set_entity(self, *args, **kwargs): fct(self, *args, **kwargs) entity_manager = _get_manager(manager) - if len(args) == 1: - value = args[0] - else: - value = args + value = args[0] if len(args) == 1 else args event = _make_event( self, EventOperation.UPDATE, diff --git a/taipy/core/_entity/submittable.py b/taipy/core/_entity/submittable.py index 64841f1664..95c62d8701 100644 --- a/taipy/core/_entity/submittable.py +++ b/taipy/core/_entity/submittable.py @@ -33,7 +33,7 @@ class Submittable: """ def __init__(self, subscribers: Optional[List[_Subscriber]] = None): - self._subscribers = _ListAttributes(self, subscribers or list()) + self._subscribers = _ListAttributes(self, subscribers or []) @abc.abstractmethod def submit( @@ -129,7 +129,11 @@ def _get_sorted_tasks(self) -> List[List[Task]]: dag = self._build_dag() remove = [node for node, degree in dict(dag.in_degree).items() if degree == 0 and isinstance(node, DataNode)] dag.remove_nodes_from(remove) - return list(nodes for nodes in nx.topological_generations(dag) if (Task in (type(node) for node in nodes))) + return [ + nodes + for nodes in nx.topological_generations(dag) + if (Task in (type(node) for node in nodes)) + ] def _add_subscriber(self, callback: Callable, params: Optional[List[Any]] = None): params = [] if params is None else params @@ -138,8 +142,7 @@ def _add_subscriber(self, callback: Callable, params: Optional[List[Any]] = None def _remove_subscriber(self, callback: Callable, params: Optional[List[Any]] = None): if params is not None: self._subscribers.remove(_Subscriber(callback, params)) - else: - elem = [x for x in self._subscribers if x.callback == callback] - if not elem: - raise ValueError + elif elem := [x for x in self._subscribers if x.callback == callback]: self._subscribers.remove(elem[0]) + else: + raise ValueError diff --git a/taipy/core/_orchestrator/_dispatcher/_task_function_wrapper.py b/taipy/core/_orchestrator/_dispatcher/_task_function_wrapper.py index 8972dc35f7..8b5aa5236c 100644 --- a/taipy/core/_orchestrator/_dispatcher/_task_function_wrapper.py +++ b/taipy/core/_orchestrator/_dispatcher/_task_function_wrapper.py @@ -38,8 +38,7 @@ def __call__(self, **kwargs): def execute(self, **kwargs): """Execute the wrapped function. If `config_as_string` is given, then it will be reapplied to the config.""" try: - config_as_string = kwargs.pop("config_as_string", None) - if config_as_string: + if config_as_string := kwargs.pop("config_as_string", None): logger.info("Updating with given config.") Config._applied_config._update(_TomlSerializer()._deserialize(config_as_string)) Config.block_update() diff --git a/taipy/core/_orchestrator/_orchestrator.py b/taipy/core/_orchestrator/_orchestrator.py index aa810118ea..5369a7963c 100644 --- a/taipy/core/_orchestrator/_orchestrator.py +++ b/taipy/core/_orchestrator/_orchestrator.py @@ -77,26 +77,26 @@ def submit( tasks = submittable._get_sorted_tasks() with cls.lock: for ts in tasks: - for task in ts: - jobs.append( - cls._lock_dn_output_and_create_job( - task, - submission.id, - submission.entity_id, - callbacks=itertools.chain([submission._update_submission_status], callbacks or []), - force=force, # type: ignore - ) + jobs.extend( + cls._lock_dn_output_and_create_job( + task, + submission.id, + submission.entity_id, + callbacks=itertools.chain( + [submission._update_submission_status], callbacks or [] + ), + force=force, # type: ignore ) - + for task in ts + ) submission.jobs = jobs # type: ignore cls._orchestrate_job_to_run_or_block(jobs) if Config.job_config.is_development: cls._check_and_execute_jobs_if_development_mode() - else: - if wait: - cls.__wait_until_job_finished(jobs, timeout=timeout) + elif wait: + cls.__wait_until_job_finished(jobs, timeout=timeout) return jobs @@ -141,9 +141,8 @@ def submit_task( if Config.job_config.is_development: cls._check_and_execute_jobs_if_development_mode() - else: - if wait: - cls.__wait_until_job_finished(job, timeout=timeout) + elif wait: + cls.__wait_until_job_finished(job, timeout=timeout) return job @@ -158,12 +157,14 @@ def _lock_dn_output_and_create_job( ) -> Job: for dn in task.output.values(): dn.lock_edit() - job = _JobManagerFactory._build_manager()._create( - task, itertools.chain([cls._on_status_change], callbacks or []), submit_id, submit_entity_id, force=force + return _JobManagerFactory._build_manager()._create( + task, + itertools.chain([cls._on_status_change], callbacks or []), + submit_id, + submit_entity_id, + force=force, ) - return job - @classmethod def _orchestrate_job_to_run_or_block(cls, jobs: List[Job]): blocked_jobs = [] @@ -184,9 +185,7 @@ def _orchestrate_job_to_run_or_block(cls, jobs: List[Job]): @classmethod def __wait_until_job_finished(cls, jobs: Union[List[Job], Job], timeout: Optional[Union[float, int]] = None): def __check_if_timeout(start, timeout): - if timeout: - return (datetime.now() - start).seconds < timeout - return True + return (datetime.now() - start).seconds < timeout if timeout else True start = datetime.now() jobs = jobs if isinstance(jobs, Iterable) else [jobs] @@ -195,7 +194,7 @@ def __check_if_timeout(start, timeout): while __check_if_timeout(start, timeout) and index < len(jobs): try: if jobs[index]._is_finished(): - index = index + 1 + index += 1 else: sleep(0.5) # Limit CPU usage @@ -255,7 +254,7 @@ def cancel_job(cls, job: Job): cls.__logger.info(f"{job.id} has already failed and cannot be canceled.") else: with cls.lock: - to_cancel_or_abandon_jobs = set([job]) + to_cancel_or_abandon_jobs = {job} to_cancel_or_abandon_jobs.update(cls.__find_subsequent_jobs(job.submit_id, set(job.task.output.keys()))) cls.__remove_blocked_jobs(to_cancel_or_abandon_jobs) cls.__remove_jobs_to_run(to_cancel_or_abandon_jobs) @@ -271,7 +270,7 @@ def __find_subsequent_jobs(cls, submit_id, output_dn_config_ids: Set) -> Set[Job if job.submit_id == submit_id and len(output_dn_config_ids.intersection(job_input_dn_config_ids)) > 0: next_output_dn_config_ids.update(job.task.output.keys()) subsequent_jobs.update([job]) - if len(next_output_dn_config_ids) > 0: + if next_output_dn_config_ids: subsequent_jobs.update( cls.__find_subsequent_jobs(submit_id, output_dn_config_ids=next_output_dn_config_ids) ) @@ -316,11 +315,10 @@ def _cancel_jobs(cls, job_id_to_cancel: JobId, jobs: Set[Job]): cls.__logger.info(f"{job.id} has already been completed and cannot be canceled.") elif job.is_skipped(): cls.__logger.info(f"{job.id} has already been skipped and cannot be canceled.") + elif job_id_to_cancel == job.id: + job.canceled() else: - if job_id_to_cancel == job.id: - job.canceled() - else: - job.abandoned() + job.abandoned() @staticmethod def _check_and_execute_jobs_if_development_mode(): diff --git a/taipy/core/_repository/_base_taipy_model.py b/taipy/core/_repository/_base_taipy_model.py index 68fff281e3..c7254fbadc 100644 --- a/taipy/core/_repository/_base_taipy_model.py +++ b/taipy/core/_repository/_base_taipy_model.py @@ -24,8 +24,7 @@ class _BaseModel: __table__: Table def __iter__(self): - for attr, value in self.__dict__.items(): - yield attr, value + yield from self.__dict__.items() def to_dict(self) -> Dict[str, Any]: model_dict = {**dataclasses.asdict(self)} diff --git a/taipy/core/_repository/_encoder.py b/taipy/core/_repository/_encoder.py index ab48870bfe..513ec73d97 100644 --- a/taipy/core/_repository/_encoder.py +++ b/taipy/core/_repository/_encoder.py @@ -27,14 +27,13 @@ def _timedelta_to_str(self, obj: timedelta) -> str: def default(self, o: Any): if isinstance(o, Enum): - result = o.value + return o.value elif isinstance(o, datetime): - result = {"__type__": "Datetime", "__value__": o.isoformat()} + return {"__type__": "Datetime", "__value__": o.isoformat()} elif isinstance(o, timedelta): - result = {"__type__": "Timedelta", "__value__": self._timedelta_to_str(o)} + return {"__type__": "Timedelta", "__value__": self._timedelta_to_str(o)} else: - result = json.JSONEncoder.default(self, o) - return result + return json.JSONEncoder.default(self, o) def dumps(d): diff --git a/taipy/core/_repository/_filesystem_repository.py b/taipy/core/_repository/_filesystem_repository.py index d352e9316b..1cbe4ae52c 100644 --- a/taipy/core/_repository/_filesystem_repository.py +++ b/taipy/core/_repository/_filesystem_repository.py @@ -159,7 +159,7 @@ def _get_by_configs_and_owner_ids(self, configs_and_owner_ids, filters: Optional res[key] = entity configs_and_owner_ids.remove(key) - if len(configs_and_owner_ids) == 0: + if not configs_and_owner_ids: return res except FileNotFoundError: # Folder with data was not created yet. @@ -170,11 +170,7 @@ def _get_by_configs_and_owner_ids(self, configs_and_owner_ids, filters: Optional def _get_by_config_and_owner_id( self, config_id: str, owner_id: Optional[str], filters: Optional[List[Dict]] = None ) -> Optional[Entity]: - if not filters: - filters = [{}] - else: - filters = copy.deepcopy(filters) - + filters = [{}] if not filters else copy.deepcopy(filters) if owner_id is not None: for fil in filters: fil.update({"owner_id": owner_id}) @@ -228,8 +224,7 @@ def __file_content_to_entity(self, file_content): if isinstance(file_content, str): file_content = json.loads(file_content, cls=_Decoder) model = self.model_type.from_dict(file_content) - entity = self.converter._model_to_entity(model) - return entity + return self.converter._model_to_entity(model) def __filter_by(self, filepath: pathlib.Path, filters: Optional[List[Dict]]) -> Optional[Json]: if not filters: diff --git a/taipy/core/_repository/_sql_repository.py b/taipy/core/_repository/_sql_repository.py index e0d8e79d97..3317f0133b 100644 --- a/taipy/core/_repository/_sql_repository.py +++ b/taipy/core/_repository/_sql_repository.py @@ -130,11 +130,14 @@ def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): query = self.table.select().filter_by(id=entity_id) - if entry := self.db.execute(str(query.compile(dialect=sqlite.dialect())), [entity_id]).fetchone(): - with open(export_path, "w", encoding="utf-8") as export_file: - export_file.write(json.dumps(entry)) - else: + if not ( + entry := self.db.execute( + str(query.compile(dialect=sqlite.dialect())), [entity_id] + ).fetchone() + ): raise ModelNotFound(self.model_type, entity_id) # type: ignore + with open(export_path, "w", encoding="utf-8") as export_file: + export_file.write(json.dumps(entry)) ########################################### # ## Specific or optimized methods ## # @@ -165,8 +168,9 @@ def _get_by_configs_and_owner_ids(self, configs_and_owner_ids, filters: Optional configs_and_owner_ids = set(configs_and_owner_ids) for config, owner in configs_and_owner_ids: - entry = self.__get_entities_by_config_and_owner(config.id, owner, filters) - if entry: + if entry := self.__get_entities_by_config_and_owner( + config.id, owner, filters + ): entity = self.converter._model_to_entity(entry) key = config, owner res[key] = entity @@ -190,7 +194,7 @@ def __get_entities_by_config_and_owner( if versions: table_name = self.table.name - query = query + f" AND {table_name}.version IN ({','.join(['?']*len(versions))})" + query += f" AND {table_name}.version IN ({','.join(['?'] * len(versions))})" parameters.extend(versions) if entry := self.db.execute(query, parameters).fetchone(): diff --git a/taipy/core/_repository/db/_sql_connection.py b/taipy/core/_repository/db/_sql_connection.py index 1127cc4e3f..9d7442cbe7 100644 --- a/taipy/core/_repository/db/_sql_connection.py +++ b/taipy/core/_repository/db/_sql_connection.py @@ -22,10 +22,7 @@ def dict_factory(cursor, row): - d = {} - for idx, col in enumerate(cursor.description): - d[col[0]] = row[idx] - return d + return {col[0]: row[idx] for idx, col in enumerate(cursor.description)} class _SQLConnection: diff --git a/taipy/core/_version/_utils.py b/taipy/core/_version/_utils.py index 369ddd9be2..a11f548895 100644 --- a/taipy/core/_version/_utils.py +++ b/taipy/core/_version/_utils.py @@ -43,8 +43,11 @@ def __get_migration_fcts_to_latest(source_version: str, config_id: str) -> List[ versions_to_migrate = production_versions[start_index:] for version in versions_to_migrate: - migration_fct = Config.unique_sections[MigrationConfig.name].migration_fcts.get(version, {}).get(config_id) - if migration_fct: + if ( + migration_fct := Config.unique_sections[MigrationConfig.name] + .migration_fcts.get(version, {}) + .get(config_id) + ): migration_fcts_to_latest.append(migration_fct) return migration_fcts_to_latest diff --git a/taipy/core/_version/_version_manager.py b/taipy/core/_version/_version_manager.py index 99cf0ee04c..32647260a3 100644 --- a/taipy/core/_version/_version_manager.py +++ b/taipy/core/_version/_version_manager.py @@ -54,14 +54,13 @@ def _get_or_create(cls, id: str, force: bool) -> _Version: if version := cls._get(id): comparator_result = Config._comparator._find_conflict_config(version.config, Config._applied_config, id) if comparator_result.get(_ComparatorResult.CONFLICTED_SECTION_KEY): - if force: - cls.__logger.warning( - f"Option --force is detected, overriding the configuration of version {id} ..." - ) - version.config = Config._applied_config - else: + if not force: raise ConflictedConfigurationError() + cls.__logger.warning( + f"Option --force is detected, overriding the configuration of version {id} ..." + ) + version.config = Config._applied_config else: version = _Version(id=id, config=Config._applied_config) @@ -212,16 +211,16 @@ def _manage_version(cls): raise SystemExit(f"Undefined execution mode: {Config.core.mode}.") @classmethod - def __check_production_migration_config(self): + def __check_production_migration_config(cls): from ..config.checkers._migration_config_checker import _MigrationConfigChecker collector = _MigrationConfigChecker(Config._applied_config, IssueCollector())._check() for issue in collector._warnings: - self.__logger.warning(str(issue)) + cls.__logger.warning(str(issue)) for issue in collector._infos: - self.__logger.info(str(issue)) + cls.__logger.info(str(issue)) for issue in collector._errors: - self.__logger.error(str(issue)) + cls.__logger.error(str(issue)) if len(collector._errors) != 0: raise SystemExit("Configuration errors found. Please check the error log for more information.") diff --git a/taipy/core/_version/_version_manager_factory.py b/taipy/core/_version/_version_manager_factory.py index b417a04e0c..ea91ea62b0 100644 --- a/taipy/core/_version/_version_manager_factory.py +++ b/taipy/core/_version/_version_manager_factory.py @@ -24,11 +24,13 @@ class _VersionManagerFactory(_ManagerFactory): def _build_manager(cls) -> _VersionManager: # type: ignore if cls._using_enterprise(): version_manager = _utils._load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + "._version._version_manager", "_VersionManager" - ) # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}._version._version_manager", + "_VersionManager", + ) build_repository = _utils._load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + "._version._version_manager_factory", "_VersionManagerFactory" - )._build_repository # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}._version._version_manager_factory", + "_VersionManagerFactory", + )._build_repository else: version_manager = _VersionManager build_repository = cls._build_repository diff --git a/taipy/core/_version/_version_mixin.py b/taipy/core/_version/_version_mixin.py index 8f3a5c4175..e8ec8b1391 100644 --- a/taipy/core/_version/_version_mixin.py +++ b/taipy/core/_version/_version_mixin.py @@ -28,10 +28,11 @@ def __fetch_version_number(cls, version_number): @classmethod def _build_filters_with_version(cls, version_number) -> List[Dict]: - filters = [] - if versions := cls.__fetch_version_number(version_number): - filters = [{"version": version} for version in versions] - return filters + return ( + [{"version": version} for version in versions] + if (versions := cls.__fetch_version_number(version_number)) + else [] + ) @classmethod def _get_latest_version(cls): diff --git a/taipy/core/common/_mongo_connector.py b/taipy/core/common/_mongo_connector.py index bb85daa70f..e6781cc2ed 100644 --- a/taipy/core/common/_mongo_connector.py +++ b/taipy/core/common/_mongo_connector.py @@ -38,7 +38,7 @@ def _connect_mongodb( extra_args_str = "&".join(f"{k}={str(v)}" for k, v in db_extra_args) if extra_args_str: - extra_args_str = "/?" + extra_args_str + extra_args_str = f"/?{extra_args_str}" driver = "mongodb" if db_driver: diff --git a/taipy/core/common/_utils.py b/taipy/core/common/_utils.py index 25ec7a4897..73affe29e9 100644 --- a/taipy/core/common/_utils.py +++ b/taipy/core/common/_utils.py @@ -52,10 +52,7 @@ def newfn(*args, **kwargs): @functools.lru_cache def _get_fct_name(f) -> Optional[str]: - # Mock function does not have __qualname__ attribute -> return __name__ - # Partial or anonymous function does not have __name__ or __qualname__ attribute -> return None - name = getattr(f, "__qualname__", getattr(f, "__name__", None)) - return name + return getattr(f, "__qualname__", getattr(f, "__name__", None)) def _fct_to_dict(obj): @@ -66,14 +63,14 @@ def _fct_to_dict(obj): callback = obj.callback params = obj.params - fct_name = _get_fct_name(callback) - if not fct_name: + if fct_name := _get_fct_name(callback): + return { + "fct_name": fct_name, + "fct_params": params, + "fct_module": callback.__module__, + } + else: return None - return { - "fct_name": fct_name, - "fct_params": params, - "fct_module": callback.__module__, - } def _fcts_to_dict(objs): diff --git a/taipy/core/config/checkers/_config_id_checker.py b/taipy/core/config/checkers/_config_id_checker.py index 896aaff103..9bc4ae703a 100644 --- a/taipy/core/config/checkers/_config_id_checker.py +++ b/taipy/core/config/checkers/_config_id_checker.py @@ -24,7 +24,7 @@ def _check(self) -> IssueCollector: existing_config_ids: Dict[str, List[str]] = dict() for entity_type, section_dictionary in self._config._sections.items(): for config_id in section_dictionary.keys(): - if config_id in existing_config_ids.keys(): + if config_id in existing_config_ids: existing_config_ids[config_id].append(entity_type) else: existing_config_ids[config_id] = [entity_type] diff --git a/taipy/core/config/checkers/_data_node_config_checker.py b/taipy/core/config/checkers/_data_node_config_checker.py index ec11d62eb5..862ee15edf 100644 --- a/taipy/core/config/checkers/_data_node_config_checker.py +++ b/taipy/core/config/checkers/_data_node_config_checker.py @@ -66,95 +66,102 @@ def _check_validity_period(self, data_node_config_id: str, data_node_config: Dat ) def _check_required_properties(self, data_node_config_id: str, data_node_config: DataNodeConfig): - if storage_type := data_node_config.storage_type: - if storage_type in DataNodeConfig._REQUIRED_PROPERTIES: - required_properties = DataNodeConfig._REQUIRED_PROPERTIES[storage_type] + if not (storage_type := data_node_config.storage_type): + return + if storage_type in DataNodeConfig._REQUIRED_PROPERTIES: + required_properties = DataNodeConfig._REQUIRED_PROPERTIES[storage_type] + if data_node_config.properties: if storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_SQL: - if data_node_config.properties: - if engine := data_node_config.properties.get(DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY): - if engine == DataNodeConfig._DB_ENGINE_SQLITE: - required_properties = [ - DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, - DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, - DataNodeConfig._REQUIRED_READ_QUERY_SQL_PROPERTY, - DataNodeConfig._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, - ] - else: - required_properties = [ - DataNodeConfig._OPTIONAL_DB_USERNAME_SQL_PROPERTY, - DataNodeConfig._OPTIONAL_DB_PASSWORD_SQL_PROPERTY, - DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, - DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, - DataNodeConfig._REQUIRED_READ_QUERY_SQL_PROPERTY, - DataNodeConfig._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, - ] + if engine := data_node_config.properties.get(DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY): + required_properties = ( + [ + DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, + DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, + DataNodeConfig._REQUIRED_READ_QUERY_SQL_PROPERTY, + DataNodeConfig._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, + ] + if engine == DataNodeConfig._DB_ENGINE_SQLITE + else [ + DataNodeConfig._OPTIONAL_DB_USERNAME_SQL_PROPERTY, + DataNodeConfig._OPTIONAL_DB_PASSWORD_SQL_PROPERTY, + DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, + DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, + DataNodeConfig._REQUIRED_READ_QUERY_SQL_PROPERTY, + DataNodeConfig._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, + ] + ) + if data_node_config.properties: if storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_SQL_TABLE: - if data_node_config.properties: - if engine := data_node_config.properties.get(DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY): - if engine == DataNodeConfig._DB_ENGINE_SQLITE: - required_properties = [ - DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, - DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, - DataNodeConfig._REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY, - ] - else: - required_properties = [ - DataNodeConfig._OPTIONAL_DB_USERNAME_SQL_PROPERTY, - DataNodeConfig._OPTIONAL_DB_PASSWORD_SQL_PROPERTY, - DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, - DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, - DataNodeConfig._REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY, - ] - for required_property in required_properties: - if not data_node_config.properties or required_property not in data_node_config.properties: - if data_node_config_id == DataNodeConfig._DEFAULT_KEY: - self._warning( - required_property, - None, - f"DataNodeConfig `{data_node_config_id}` is missing the required " - f"property `{required_property}` for type `{storage_type}`.", - ) + if engine := data_node_config.properties.get(DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY): + if engine == DataNodeConfig._DB_ENGINE_SQLITE: + required_properties = [ + DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, + DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, + DataNodeConfig._REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY, + ] else: - self._error( - required_property, - None, - f"DataNodeConfig `{data_node_config_id}` is missing the required " - f"property `{required_property}` for type `{storage_type}`.", - ) - - def _check_generic_read_write_fct_and_args(self, data_node_config_id: str, data_node_config: DataNodeConfig): - if data_node_config.storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_GENERIC: - properties_to_check = [ - DataNodeConfig._OPTIONAL_READ_FUNCTION_ARGS_GENERIC_PROPERTY, - DataNodeConfig._OPTIONAL_WRITE_FUNCTION_ARGS_GENERIC_PROPERTY, - ] - for prop_key in properties_to_check: - if data_node_config.properties and prop_key in data_node_config.properties: - prop_value = data_node_config.properties[prop_key] - if not isinstance(prop_value, list): + required_properties = [ + DataNodeConfig._OPTIONAL_DB_USERNAME_SQL_PROPERTY, + DataNodeConfig._OPTIONAL_DB_PASSWORD_SQL_PROPERTY, + DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, + DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, + DataNodeConfig._REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY, + ] + for required_property in required_properties: + if not data_node_config.properties or required_property not in data_node_config.properties: + if data_node_config_id == DataNodeConfig._DEFAULT_KEY: + self._warning( + required_property, + None, + f"DataNodeConfig `{data_node_config_id}` is missing the required " + f"property `{required_property}` for type `{storage_type}`.", + ) + else: self._error( - prop_key, - prop_value, - f"`{prop_key}` field of DataNodeConfig" - f" `{data_node_config_id}` must be populated with a List value.", + required_property, + None, + f"DataNodeConfig `{data_node_config_id}` is missing the required " + f"property `{required_property}` for type `{storage_type}`.", ) - if data_node_config_id != DataNodeConfig._DEFAULT_KEY: - properties_to_check_at_least_one = [ - DataNodeConfig._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY, - DataNodeConfig._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY, - ] - has_at_least_one = False - for prop_key in properties_to_check_at_least_one: - if data_node_config.properties and prop_key in data_node_config.properties: - has_at_least_one = True - if not has_at_least_one: + + def _check_generic_read_write_fct_and_args(self, data_node_config_id: str, data_node_config: DataNodeConfig): + if ( + data_node_config.storage_type + != DataNodeConfig._STORAGE_TYPE_VALUE_GENERIC + ): + return + properties_to_check = [ + DataNodeConfig._OPTIONAL_READ_FUNCTION_ARGS_GENERIC_PROPERTY, + DataNodeConfig._OPTIONAL_WRITE_FUNCTION_ARGS_GENERIC_PROPERTY, + ] + for prop_key in properties_to_check: + if data_node_config.properties and prop_key in data_node_config.properties: + prop_value = data_node_config.properties[prop_key] + if not isinstance(prop_value, list): self._error( - ", ".join(properties_to_check_at_least_one), - None, - f"Either `{DataNodeConfig._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY}` field or " - f"`{DataNodeConfig._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY}` field of " - f"DataNodeConfig `{data_node_config_id}` must be populated with a Callable function.", + prop_key, + prop_value, + f"`{prop_key}` field of DataNodeConfig" + f" `{data_node_config_id}` must be populated with a List value.", ) + if data_node_config_id != DataNodeConfig._DEFAULT_KEY: + properties_to_check_at_least_one = [ + DataNodeConfig._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY, + DataNodeConfig._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY, + ] + has_at_least_one = any( + data_node_config.properties + and prop_key in data_node_config.properties + for prop_key in properties_to_check_at_least_one + ) + if not has_at_least_one: + self._error( + ", ".join(properties_to_check_at_least_one), + None, + f"Either `{DataNodeConfig._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY}` field or " + f"`{DataNodeConfig._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY}` field of " + f"DataNodeConfig `{data_node_config_id}` must be populated with a Callable function.", + ) def _check_callable(self, data_node_config_id: str, data_node_config: DataNodeConfig): properties_to_check = { @@ -168,7 +175,7 @@ def _check_callable(self, data_node_config_id: str, data_node_config: DataNodeCo ], } - if data_node_config.storage_type in properties_to_check.keys(): + if data_node_config.storage_type in properties_to_check: for prop_key in properties_to_check[data_node_config.storage_type]: prop_value = data_node_config.properties.get(prop_key) if data_node_config.properties else None if prop_value and not callable(prop_value): diff --git a/taipy/core/config/checkers/_scenario_config_checker.py b/taipy/core/config/checkers/_scenario_config_checker.py index 981363e59b..23108dc064 100644 --- a/taipy/core/config/checkers/_scenario_config_checker.py +++ b/taipy/core/config/checkers/_scenario_config_checker.py @@ -115,10 +115,11 @@ def _check_additional_dns_not_overlapping_tasks_dns(self, scenario_config_id: st def _check_tasks_in_sequences_exist_in_scenario_tasks( self, scenario_config_id: str, scenario_config: ScenarioConfig ): - scenario_task_ids = set() - for task_config in scenario_config.tasks: - if isinstance(task_config, TaskConfig): - scenario_task_ids.add(task_config.id) + scenario_task_ids = { + task_config.id + for task_config in scenario_config.tasks + if isinstance(task_config, TaskConfig) + } for sequence_tasks in scenario_config.sequences.values(): self._check_children( ScenarioConfig, diff --git a/taipy/core/config/checkers/_task_config_checker.py b/taipy/core/config/checkers/_task_config_checker.py index a4f8b4c483..dae13a4524 100644 --- a/taipy/core/config/checkers/_task_config_checker.py +++ b/taipy/core/config/checkers/_task_config_checker.py @@ -49,11 +49,10 @@ def _check_existing_function(self, task_config_id: str, task_config: TaskConfig) task_config.function, f"{task_config._FUNCTION} field of TaskConfig `{task_config_id}` is empty.", ) - else: - if not callable(task_config.function): - self._error( - task_config._FUNCTION, - task_config.function, - f"{task_config._FUNCTION} field of TaskConfig `{task_config_id}` must be" - f" populated with Callable value.", - ) + elif not callable(task_config.function): + self._error( + task_config._FUNCTION, + task_config.function, + f"{task_config._FUNCTION} field of TaskConfig `{task_config_id}` must be" + f" populated with Callable value.", + ) diff --git a/taipy/core/config/data_node_config.py b/taipy/core/config/data_node_config.py index c4f493dc84..dcf5ef74c3 100644 --- a/taipy/core/config/data_node_config.py +++ b/taipy/core/config/data_node_config.py @@ -299,10 +299,7 @@ def validity_period(self, val): def cacheable(self): _warn_deprecated("cacheable", suggest="the skippable feature") cacheable = self._properties.get("cacheable") - if cacheable is not None: - return _tpl._replace_templates(cacheable) - else: - return False + return _tpl._replace_templates(cacheable) if cacheable is not None else False @cacheable.setter # type: ignore @_ConfigBlocker._check() @@ -454,20 +451,20 @@ def _configure( Returns: The new data node configuration. """ - configuration_map: Dict[str, Callable] = { - cls._STORAGE_TYPE_VALUE_PICKLE: cls._configure_pickle, - cls._STORAGE_TYPE_VALUE_SQL_TABLE: cls._configure_sql_table, - cls._STORAGE_TYPE_VALUE_SQL: cls._configure_sql, - cls._STORAGE_TYPE_VALUE_MONGO_COLLECTION: cls._configure_mongo_collection, - cls._STORAGE_TYPE_VALUE_CSV: cls._configure_csv, - cls._STORAGE_TYPE_VALUE_EXCEL: cls._configure_excel, - cls._STORAGE_TYPE_VALUE_IN_MEMORY: cls._configure_in_memory, - cls._STORAGE_TYPE_VALUE_GENERIC: cls._configure_generic, - cls._STORAGE_TYPE_VALUE_JSON: cls._configure_json, - cls._STORAGE_TYPE_VALUE_PARQUET: cls._configure_parquet, - } - if storage_type in cls._ALL_STORAGE_TYPES: + configuration_map: Dict[str, Callable] = { + cls._STORAGE_TYPE_VALUE_PICKLE: cls._configure_pickle, + cls._STORAGE_TYPE_VALUE_SQL_TABLE: cls._configure_sql_table, + cls._STORAGE_TYPE_VALUE_SQL: cls._configure_sql, + cls._STORAGE_TYPE_VALUE_MONGO_COLLECTION: cls._configure_mongo_collection, + cls._STORAGE_TYPE_VALUE_CSV: cls._configure_csv, + cls._STORAGE_TYPE_VALUE_EXCEL: cls._configure_excel, + cls._STORAGE_TYPE_VALUE_IN_MEMORY: cls._configure_in_memory, + cls._STORAGE_TYPE_VALUE_GENERIC: cls._configure_generic, + cls._STORAGE_TYPE_VALUE_JSON: cls._configure_json, + cls._STORAGE_TYPE_VALUE_PARQUET: cls._configure_parquet, + } + return configuration_map[storage_type](id=id, scope=scope, validity_period=validity_period, **properties) return cls.__configure(id, storage_type, scope, validity_period, **properties) diff --git a/taipy/core/config/job_config.py b/taipy/core/config/job_config.py index d8607979a3..c5e02c0c23 100644 --- a/taipy/core/config/job_config.py +++ b/taipy/core/config/job_config.py @@ -66,8 +66,7 @@ def _to_dict(self): @classmethod def _from_dict(cls, config_as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): mode = config_as_dict.pop(cls._MODE_KEY, None) - job_config = JobConfig(mode, **config_as_dict) - return job_config + return JobConfig(mode, **config_as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): mode = _tpl._replace_templates(as_dict.pop(self._MODE_KEY, self.mode)) diff --git a/taipy/core/config/scenario_config.py b/taipy/core/config/scenario_config.py index 2add956c1e..e6e161275a 100644 --- a/taipy/core/config/scenario_config.py +++ b/taipy/core/config/scenario_config.py @@ -90,7 +90,7 @@ def __init__( def __copy__(self): comp = None if self.comparators is None else self.comparators - scenario_config = ScenarioConfig( + return ScenarioConfig( self.id, copy(self._tasks), copy(self._additional_data_nodes), @@ -99,7 +99,6 @@ def __copy__(self): copy(self.sequences), **copy(self._properties), ) - return scenario_config def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) @@ -138,11 +137,11 @@ def __get_all_unique_data_nodes(self) -> List[DataNodeConfig]: @classmethod def default_config(cls): - return ScenarioConfig(cls._DEFAULT_KEY, list(), list(), None, dict()) + return ScenarioConfig(cls._DEFAULT_KEY, [], [], None, dict()) def _clean(self): - self._tasks = list() - self._additional_data_nodes = list() + self._tasks = [] + self._additional_data_nodes = [] self.frequency = None self.comparators = dict() self.sequences = dict() @@ -164,9 +163,9 @@ def _from_dict( ) -> "ScenarioConfig": # type: ignore as_dict.pop(cls._ID_KEY, id) - tasks = cls.__get_task_configs(as_dict.pop(cls._TASKS_KEY, list()), config) + tasks = cls.__get_task_configs(as_dict.pop(cls._TASKS_KEY, []), config) - additional_data_node_ids = as_dict.pop(cls._ADDITIONAL_DATA_NODES_KEY, list()) + additional_data_node_ids = as_dict.pop(cls._ADDITIONAL_DATA_NODES_KEY, []) additional_data_nodes = cls.__get_additional_data_node_configs(additional_data_node_ids, config) frequency = as_dict.pop(cls._FREQUENCY_KEY, None) @@ -176,7 +175,7 @@ def _from_dict( for sequence_name, sequence_tasks in sequences.items(): sequences[sequence_name] = cls.__get_task_configs(sequence_tasks, config) - scenario_config = ScenarioConfig( + return ScenarioConfig( id=id, tasks=tasks, additional_data_nodes=additional_data_nodes, @@ -186,8 +185,6 @@ def _from_dict( **as_dict, ) - return scenario_config - @staticmethod def __get_task_configs(task_config_ids: List[str], config: Optional[_Config]): task_configs = set() diff --git a/taipy/core/cycle/_cycle_manager_factory.py b/taipy/core/cycle/_cycle_manager_factory.py index 04673b0c3b..865573c69a 100644 --- a/taipy/core/cycle/_cycle_manager_factory.py +++ b/taipy/core/cycle/_cycle_manager_factory.py @@ -26,11 +26,13 @@ class _CycleManagerFactory(_ManagerFactory): def _build_manager(cls) -> Type[_CycleManager]: # type: ignore if cls._using_enterprise(): cycle_manager = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".cycle._cycle_manager", "_CycleManager" - ) # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.cycle._cycle_manager", + "_CycleManager", + ) build_repository = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".cycle._cycle_manager_factory", "_CycleManagerFactory" - )._build_repository # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.cycle._cycle_manager_factory", + "_CycleManagerFactory", + )._build_repository else: cycle_manager = _CycleManager build_repository = cls._build_repository diff --git a/taipy/core/cycle/cycle.py b/taipy/core/cycle/cycle.py index 102a197a9e..2dc2f300f7 100644 --- a/taipy/core/cycle/cycle.py +++ b/taipy/core/cycle/cycle.py @@ -141,10 +141,10 @@ def _get_valid_filename(name: str) -> str: """ Source: https://github.com/django/django/blob/main/django/utils/text.py """ - s = str(name).strip().replace(" ", "_") + s = name.strip().replace(" ", "_") s = re.sub(r"(?u)[^-\w.]", "", s) if s in {"", ".", ".."}: - raise _SuspiciousFileOperation("Could not derive file name from '%s'" % name) + raise _SuspiciousFileOperation(f"Could not derive file name from '{name}'") s = str(s).strip().replace(" ", "_") return re.sub(r"(?u)[^-\w.]", "", s) @@ -154,7 +154,9 @@ def __getattr__(self, attribute_name): protected_attribute_name = attribute_name if protected_attribute_name in self._properties: return self._properties[protected_attribute_name] - raise AttributeError(f"{attribute_name} is not an attribute of cycle {self.id}") + raise AttributeError( + f"{protected_attribute_name} is not an attribute of cycle {self.id}" + ) def __eq__(self, other): return self.id == other.id diff --git a/taipy/core/data/_abstract_sql.py b/taipy/core/data/_abstract_sql.py index b3015f734f..6e3d741aad 100644 --- a/taipy/core/data/_abstract_sql.py +++ b/taipy/core/data/_abstract_sql.py @@ -152,7 +152,7 @@ def _check_required_properties(self, properties: Dict): if missing := set(required) - set(properties.keys()): raise MissingRequiredProperty( - f"The following properties " f"{', '.join(x for x in missing)} were not informed and are required." + f"The following properties {', '.join(missing)} were not informed and are required." ) def _get_engine(self): diff --git a/taipy/core/data/_data_converter.py b/taipy/core/data/_data_converter.py index 0744020672..3eeb65eeb8 100644 --- a/taipy/core/data/_data_converter.py +++ b/taipy/core/data/_data_converter.py @@ -87,7 +87,10 @@ def __serialize_sql_dn_properties(cls, datanode_properties: dict) -> dict: @classmethod def __serialize_mongo_collection_dn_model_properties(cls, datanode_properties: dict) -> dict: - if MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY in datanode_properties.keys(): + if ( + MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY + in datanode_properties + ): datanode_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY] = ( f"{datanode_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY].__module__}." f"{datanode_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY].__qualname__}" @@ -242,7 +245,10 @@ def __deserialize_sql_dn_model_properties(cls, datanode_model_properties: dict) @classmethod def __deserialize_mongo_collection_dn_model_properties(cls, datanode_model_properties: dict) -> dict: - if MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY in datanode_model_properties.keys(): + if ( + MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY + in datanode_model_properties + ): if isinstance(datanode_model_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY], str): datanode_model_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY] = locate( datanode_model_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY] diff --git a/taipy/core/data/_data_manager.py b/taipy/core/data/_data_manager.py index ee5fc885de..c1bbeedfb5 100644 --- a/taipy/core/data/_data_manager.py +++ b/taipy/core/data/_data_manager.py @@ -136,8 +136,7 @@ def _remove_dn_file_paths_in_backup_file(cls, data_nodes: Iterable[DataNode]): @classmethod def _delete(cls, data_node_id: DataNodeId): - data_node = cls._get(data_node_id, None) - if data_node: + if data_node := cls._get(data_node_id, None): cls._clean_pickle_file(data_node) cls._remove_dn_file_path_in_backup_file(data_node) super()._delete(data_node_id) diff --git a/taipy/core/data/_data_manager_factory.py b/taipy/core/data/_data_manager_factory.py index 8da25bd04d..1212c2b6c1 100644 --- a/taipy/core/data/_data_manager_factory.py +++ b/taipy/core/data/_data_manager_factory.py @@ -26,11 +26,13 @@ class _DataManagerFactory(_ManagerFactory): def _build_manager(cls) -> Type[_DataManager]: # type: ignore if cls._using_enterprise(): data_manager = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".data._data_manager", "_DataManager" - ) # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.data._data_manager", + "_DataManager", + ) build_repository = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".data._data_manager_factory", "_DataManagerFactory" - )._build_repository # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.data._data_manager_factory", + "_DataManagerFactory", + )._build_repository else: data_manager = _DataManager build_repository = cls._build_repository diff --git a/taipy/core/data/_filter.py b/taipy/core/data/_filter.py index bcadba90b2..05bb954df2 100644 --- a/taipy/core/data/_filter.py +++ b/taipy/core/data/_filter.py @@ -26,14 +26,19 @@ class _FilterDataNode: @staticmethod def __is_pandas_object(data) -> bool: - return isinstance(data, (pd.DataFrame, modin_pd.DataFrame)) or isinstance(data, (pd.Series, modin_pd.DataFrame)) + return isinstance( + data, (pd.DataFrame, modin_pd.DataFrame, pd.Series, modin_pd.DataFrame) + ) @staticmethod def __is_multi_sheet_excel(data) -> bool: if isinstance(data, Dict): - has_df_children = all([isinstance(e, (pd.DataFrame, modin_pd.DataFrame)) for e in data.values()]) - has_list_children = all([isinstance(e, List) for e in data.values()]) - has_np_array_children = all([isinstance(e, np.ndarray) for e in data.values()]) + has_df_children = all( + isinstance(e, (pd.DataFrame, modin_pd.DataFrame)) + for e in data.values() + ) + has_list_children = all(isinstance(e, List) for e in data.values()) + has_np_array_children = all(isinstance(e, np.ndarray) for e in data.values()) return has_df_children or has_list_children or has_np_array_children return False @@ -82,7 +87,7 @@ def __getitem_dataframe(data, key: Union[pd.DataFrame, modin_pd.DataFrame]): if _FilterDataNode.__is_pandas_object(data): return data[key] if _FilterDataNode.__is_list_of_dict(data): - filtered_data = list() + filtered_data = [] for i, row in key.iterrows(): filtered_row = dict() for col in row.index: @@ -101,10 +106,10 @@ def __getitem_bool_indexer(data, key): def __getitem_iterable(data, keys): if _FilterDataNode.__is_pandas_object(data): return data[keys] - filtered_data = [] - for entry in data: - filtered_data.append({k: getattr(entry, k) for k in keys if hasattr(entry, k)}) - return filtered_data + return [ + {k: getattr(entry, k) for k in keys if hasattr(entry, k)} + for entry in data + ] @staticmethod def _filter(data, operators: Union[List, Tuple], join_operator=JoinOperator.AND): @@ -114,7 +119,7 @@ def _filter(data, operators: Union[List, Tuple], join_operator=JoinOperator.AND) if isinstance(data, Dict): return {k: _FilterDataNode._filter(v, operators, join_operator) for k, v in data.items()} - if not ((isinstance(operators[0], list)) or (isinstance(operators[0], tuple))): + if not (isinstance(operators[0], (list, tuple))): if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): return _FilterDataNode.__filter_dataframe_per_key_value(data, operators[0], operators[1], operators[2]) if isinstance(data, np.ndarray): @@ -135,20 +140,23 @@ def _filter(data, operators: Union[List, Tuple], join_operator=JoinOperator.AND) def __filter_dataframe( df_data: Union[pd.DataFrame, modin_pd.DataFrame], operators: Union[List, Tuple], join_operator=JoinOperator.AND ): - filtered_df_data = [] if join_operator == JoinOperator.AND: how = "inner" elif join_operator == JoinOperator.OR: how = "outer" else: return NotImplementedError - for key, value, operator in operators: - filtered_df_data.append(_FilterDataNode.__filter_dataframe_per_key_value(df_data, key, value, operator)) - + filtered_df_data = [ + _FilterDataNode.__filter_dataframe_per_key_value( + df_data, key, value, operator + ) + for key, value, operator in operators + ] if isinstance(df_data, modin_pd.DataFrame): if filtered_df_data: return _FilterDataNode.__modin_dataframe_merge(filtered_df_data, how) - return modin_pd.DataFrame() + else: + return modin_pd.DataFrame() return _FilterDataNode.__dataframe_merge(filtered_df_data, how) if filtered_df_data else pd.DataFrame() @@ -181,10 +189,12 @@ def __modin_dataframe_merge(df_list: List, how="inner"): @staticmethod def __filter_numpy_array(data: np.ndarray, operators: Union[List, Tuple], join_operator=JoinOperator.AND): - conditions = [] - for key, value, operator in operators: - conditions.append(_FilterDataNode.__get_filter_condition_per_key_value(data, key, value, operator)) - + conditions = [ + _FilterDataNode.__get_filter_condition_per_key_value( + data, key, value, operator + ) + for key, value, operator in operators + ] if join_operator == JoinOperator.AND: join_conditions = reduce(and_, conditions) elif join_operator == JoinOperator.OR: @@ -216,10 +226,13 @@ def __get_filter_condition_per_key_value(array_data: np.ndarray, key, value, ope @staticmethod def __filter_list(list_data: List, operators: Union[List, Tuple], join_operator=JoinOperator.AND): - filtered_list_data = [] - for key, value, operator in operators: - filtered_list_data.append(_FilterDataNode.__filter_list_per_key_value(list_data, key, value, operator)) - if len(filtered_list_data) == 0: + filtered_list_data = [ + _FilterDataNode.__filter_list_per_key_value( + list_data, key, value, operator + ) + for key, value, operator in operators + ] + if not filtered_list_data: return filtered_list_data if join_operator == JoinOperator.AND: return _FilterDataNode.__list_intersect(filtered_list_data) diff --git a/taipy/core/data/csv.py b/taipy/core/data/csv.py index dd32bcfee9..a0cd804fc8 100644 --- a/taipy/core/data/csv.py +++ b/taipy/core/data/csv.py @@ -183,17 +183,15 @@ def _read(self): def _read_as(self): custom_class = self.properties[self.__EXPOSED_TYPE_PROPERTY] with open(self._path, encoding=self.properties[self.__ENCODING_KEY]) as csvFile: - res = list() + res = [] if self.properties[self.__HAS_HEADER_PROPERTY]: reader = csv.DictReader(csvFile) - for line in reader: - res.append(custom_class(**line)) + res.extend(custom_class(**line) for line in reader) else: reader = csv.reader( csvFile, ) - for line in reader: - res.append(custom_class(*line)) + res.extend(custom_class(*line) for line in reader) return res def _read_as_numpy(self) -> np.ndarray: @@ -204,15 +202,20 @@ def _read_as_pandas_dataframe( ) -> pd.DataFrame: try: if self.properties[self.__HAS_HEADER_PROPERTY]: - if column_names: - return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])[column_names] - return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY]) - else: - if usecols: - return pd.read_csv( - self._path, encoding=self.properties[self.__ENCODING_KEY], header=None, usecols=usecols + return ( + pd.read_csv( + self._path, encoding=self.properties[self.__ENCODING_KEY] + )[column_names] + if column_names + else pd.read_csv( + self._path, encoding=self.properties[self.__ENCODING_KEY] ) - return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY], header=None) + ) + if usecols: + return pd.read_csv( + self._path, encoding=self.properties[self.__ENCODING_KEY], header=None, usecols=usecols + ) + return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY], header=None) except pd.errors.EmptyDataError: return pd.DataFrame() @@ -221,15 +224,20 @@ def _read_as_modin_dataframe( ) -> modin_pd.DataFrame: try: if self.properties[self.__HAS_HEADER_PROPERTY]: - if column_names: - return modin_pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])[column_names] - return modin_pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY]) - else: - if usecols: - return modin_pd.read_csv( - self._path, header=None, usecols=usecols, encoding=self.properties[self.__ENCODING_KEY] + return ( + modin_pd.read_csv( + self._path, encoding=self.properties[self.__ENCODING_KEY] + )[column_names] + if column_names + else modin_pd.read_csv( + self._path, encoding=self.properties[self.__ENCODING_KEY] ) - return modin_pd.read_csv(self._path, header=None, encoding=self.properties[self.__ENCODING_KEY]) + ) + if usecols: + return modin_pd.read_csv( + self._path, header=None, usecols=usecols, encoding=self.properties[self.__ENCODING_KEY] + ) + return modin_pd.read_csv(self._path, header=None, encoding=self.properties[self.__ENCODING_KEY]) except pd.errors.EmptyDataError: return modin_pd.DataFrame() @@ -255,9 +263,6 @@ def write_with_column_names(self, data: Any, columns: Optional[List[str]] = None columns (Optional[List[str]]): The list of column names to write. job_id (JobId^): An optional identifier of the writer. """ - if not columns: - df = pd.DataFrame(data) - else: - df = pd.DataFrame(data, columns=columns) + df = pd.DataFrame(data) if not columns else pd.DataFrame(data, columns=columns) df.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY]) self.track_edit(timestamp=datetime.now(), job_id=job_id) diff --git a/taipy/core/data/data_node.py b/taipy/core/data/data_node.py index a5ece4a37f..4eb44cf09a 100644 --- a/taipy/core/data/data_node.py +++ b/taipy/core/data/data_node.py @@ -116,7 +116,7 @@ def __init__( self._editor_expiration_date: Optional[datetime] = editor_expiration_date # Track edits - self._edits = edits or list() + self._edits = edits or [] self._properties = _Properties(self, **kwargs) @@ -144,9 +144,7 @@ def get_last_edit(self) -> Optional[Edit]: Returns: None if there has been no `Edit^` on this data node. """ - if self._edits: - return self._edits[-1] - return None + return self._edits[-1] if self._edits else None @property # type: ignore @_self_reload(_MANAGER_NAME) @@ -186,12 +184,11 @@ def validity_period(self, val): @_self_reload(_MANAGER_NAME) def expiration_date(self) -> datetime: """Datetime instant of the expiration date of this data node.""" - last_edit_date = self.last_edit_date - validity_period = self._validity_period - - if not last_edit_date: + if not (last_edit_date := self.last_edit_date): raise NoData(f"Data node {self.id} from config {self.config_id} has not been written yet.") + validity_period = self._validity_period + return last_edit_date + validity_period if validity_period else last_edit_date @property # type: ignore @@ -371,10 +368,7 @@ def track_edit(self, **options): options (dict[str, any)): track `timestamp`, `comments`, `job_id`. The others are user-custom, users can use options to attach any information to an external edit of a data node. """ - edit = {} - for k, v in options.items(): - if v is not None: - edit[k] = v + edit = {k: v for k, v in options.items() if v is not None} if "timestamp" not in edit: edit["timestamp"] = datetime.now() self.last_edit_date = edit.get("timestamp") @@ -420,10 +414,9 @@ def unlock_edit(self, editor_id: Optional[str] = None): and self.editor_expiration_date > datetime.now() ): raise DataNodeIsBeingEdited(self.id, self._editor_id) - else: - self.editor_id = None # type: ignore - self.editor_expiration_date = None # type: ignore - self.edit_in_progress = False # type: ignore + self.editor_id = None # type: ignore + self.editor_expiration_date = None # type: ignore + self.edit_in_progress = False # type: ignore def filter(self, operators: Union[List, Tuple], join_operator=JoinOperator.AND): """Read and filter the data referenced by this data node. @@ -471,12 +464,7 @@ def is_ready_for_reading(self) -> bool: False if the data is locked for modification or if the data has never been written. True otherwise. """ - if self._edit_in_progress: - return False - if not self._last_edit_date: - # Never been written so it is not up-to-date - return False - return True + return False if self._edit_in_progress else bool(self._last_edit_date) @property # type: ignore @_self_reload(_MANAGER_NAME) @@ -493,10 +481,7 @@ def is_valid(self) -> bool: if not self._validity_period: # No validity period and has already been written, so it is valid return True - if datetime.now() > self.expiration_date: - # expiration_date has been passed - return False - return True + return datetime.now() <= self.expiration_date @property def is_up_to_date(self) -> bool: diff --git a/taipy/core/data/excel.py b/taipy/core/data/excel.py index 1c7c945df4..aaf9426529 100644 --- a/taipy/core/data/excel.py +++ b/taipy/core/data/excel.py @@ -239,9 +239,7 @@ def _read_as(self): work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name) continue - res = list() - for row in work_sheet.rows: - res.append([col.value for col in row]) + res = [[col.value for col in row] for row in work_sheet.rows] if self.properties[self.__HAS_HEADER_PROPERTY] and res: header = res.pop(0) for i, row in enumerate(res): @@ -342,7 +340,8 @@ def __append_excel_with_multiple_sheets(self, data: Any, columns: List[str] = No def _append(self, data: Any): if isinstance(data, Dict) and all( - [isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()] + isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) + for x in data.values() ): self.__append_excel_with_multiple_sheets(data) elif isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): @@ -351,8 +350,7 @@ def _append(self, data: Any): self.__append_excel_with_single_sheet(pd.DataFrame(data).to_excel, index=False, header=False) def __write_excel_with_single_sheet(self, write_excel_fct, *args, **kwargs): - sheet_name = self.properties.get(self.__SHEET_NAME_PROPERTY) - if sheet_name: + if sheet_name := self.properties.get(self.__SHEET_NAME_PROPERTY): if not isinstance(sheet_name, str): if len(sheet_name) > 1: raise SheetNameLengthMismatch @@ -378,7 +376,8 @@ def __write_excel_with_multiple_sheets(self, data: Any, columns: List[str] = Non def _write(self, data: Any): if isinstance(data, Dict) and all( - [isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()] + isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) + for x in data.values() ): self.__write_excel_with_multiple_sheets(data) elif isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): @@ -395,7 +394,8 @@ def write_with_column_names(self, data: Any, columns: List[str] = None, job_id: job_id (JobId^): An optional identifier of the writer. """ if isinstance(data, Dict) and all( - [isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()] + isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) + for x in data.values() ): self.__write_excel_with_multiple_sheets(data, columns=columns) else: diff --git a/taipy/core/data/generic.py b/taipy/core/data/generic.py index 2983b26d5e..a2d3ba4070 100644 --- a/taipy/core/data/generic.py +++ b/taipy/core/data/generic.py @@ -81,14 +81,13 @@ def __init__( properties = {} if missing := set(self._REQUIRED_PROPERTIES) - set(properties.keys()): raise MissingRequiredProperty( - f"The following properties " f"{', '.join(x for x in missing)} were not informed and are required." + f"The following properties {', '.join(missing)} were not informed and are required." ) missing_optional_fcts = set(self._REQUIRED_AT_LEAST_ONE_PROPERTY) - set(properties.keys()) if len(missing_optional_fcts) == len(self._REQUIRED_AT_LEAST_ONE_PROPERTY): raise MissingRequiredProperty( - f"None of the following properties " - f"{', '.join(x for x in missing)} were informed and at least one must be populated." + f"None of the following properties {', '.join(missing)} were informed and at least one must be populated." ) for missing_optional_fct in missing_optional_fcts: properties[missing_optional_fct] = None diff --git a/taipy/core/data/mongo.py b/taipy/core/data/mongo.py index c5abd03046..80d1c4807e 100644 --- a/taipy/core/data/mongo.py +++ b/taipy/core/data/mongo.py @@ -104,7 +104,7 @@ def __init__( required = self._REQUIRED_PROPERTIES if missing := set(required) - set(properties.keys()): raise MissingRequiredProperty( - f"The following properties " f"{', '.join(x for x in missing)} were not informed and are required." + f"The following properties {', '.join(missing)} were not informed and are required." ) self._check_custom_document(properties[self._CUSTOM_DOCUMENT_PROPERTY]) diff --git a/taipy/core/exceptions/exceptions.py b/taipy/core/exceptions/exceptions.py index 30d2e8e0c8..3b94eff1da 100644 --- a/taipy/core/exceptions/exceptions.py +++ b/taipy/core/exceptions/exceptions.py @@ -85,7 +85,7 @@ class DataNodeIsBeingEdited(Exception): """Raised if a DataNode is being edited.""" def __init__(self, data_node_id: str, editor_id: Optional[str] = None): - self.message = f"DataNode {data_node_id} is being edited{ ' by ' + editor_id if editor_id else ''}." + self.message = f"DataNode {data_node_id} is being edited{f' by {editor_id}' if editor_id else ''}." class NonExistingDataNodeConfig(Exception): diff --git a/taipy/core/job/_job_manager.py b/taipy/core/job/_job_manager.py index 91f3151aa4..12df64135e 100644 --- a/taipy/core/job/_job_manager.py +++ b/taipy/core/job/_job_manager.py @@ -77,18 +77,13 @@ def _cancel(cls, job: Union[str, Job]): @classmethod def _get_latest(cls, task: Task) -> Optional[Job]: - jobs_of_task = list(filter(lambda job: task in job, cls._get_all())) - if len(jobs_of_task) == 0: - return None - if len(jobs_of_task) == 1: - return jobs_of_task[0] + if jobs_of_task := list(filter(lambda job: task in job, cls._get_all())): + return jobs_of_task[0] if len(jobs_of_task) == 1 else max(jobs_of_task) else: - return max(jobs_of_task) + return None @classmethod def _is_deletable(cls, job: Union[Job, JobId]) -> bool: if isinstance(job, str): job = cls._get(job) - if job.is_finished(): - return True - return False + return bool(job.is_finished()) diff --git a/taipy/core/job/_job_manager_factory.py b/taipy/core/job/_job_manager_factory.py index 5f1cd8dd69..1a8c3cdf00 100644 --- a/taipy/core/job/_job_manager_factory.py +++ b/taipy/core/job/_job_manager_factory.py @@ -26,11 +26,13 @@ class _JobManagerFactory(_ManagerFactory): def _build_manager(cls) -> Type[_JobManager]: # type: ignore if cls._using_enterprise(): job_manager = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".job._job_manager", "_JobManager" - ) # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.job._job_manager", + "_JobManager", + ) build_repository = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".job._job_manager_factory", "_JobManagerFactory" - )._build_repository # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.job._job_manager_factory", + "_JobManagerFactory", + )._build_repository else: job_manager = _JobManager build_repository = cls._build_repository diff --git a/taipy/core/notification/_topic.py b/taipy/core/notification/_topic.py index a7a4f073e5..24c95546b8 100644 --- a/taipy/core/notification/_topic.py +++ b/taipy/core/notification/_topic.py @@ -55,11 +55,9 @@ def __hash__(self): return hash((self.entity_type, self.entity_id, self.operation, self.attribute_name)) def __eq__(self, __value) -> bool: - if ( + return ( self.entity_type == __value.entity_type and self.entity_id == __value.entity_id and self.operation == __value.operation and self.attribute_name == __value.attribute_name - ): - return True - return False + ) diff --git a/taipy/core/notification/event.py b/taipy/core/notification/event.py index 92940c6292..78dc0f2b44 100644 --- a/taipy/core/notification/event.py +++ b/taipy/core/notification/event.py @@ -49,7 +49,11 @@ class EventEntityType(_ReprEnum): SUBMISSION = 7 -_NO_ATTRIBUTE_NAME_OPERATIONS = set([EventOperation.CREATION, EventOperation.DELETION, EventOperation.SUBMISSION]) +_NO_ATTRIBUTE_NAME_OPERATIONS = { + EventOperation.CREATION, + EventOperation.DELETION, + EventOperation.SUBMISSION, +} _UNSUBMITTABLE_ENTITY_TYPES = ( EventEntityType.CYCLE, EventEntityType.DATA_NODE, diff --git a/taipy/core/notification/notifier.py b/taipy/core/notification/notifier.py index 25557bd6c7..24b27dc2cc 100644 --- a/taipy/core/notification/notifier.py +++ b/taipy/core/notification/notifier.py @@ -156,6 +156,8 @@ def _is_matching(event: Event, topic: _Topic) -> bool: return False if topic.operation is not None and event.operation != topic.operation: return False - if topic.attribute_name is not None and event.attribute_name and event.attribute_name != topic.attribute_name: - return False - return True + return ( + topic.attribute_name is None + or not event.attribute_name + or event.attribute_name == topic.attribute_name + ) diff --git a/taipy/core/scenario/_scenario_converter.py b/taipy/core/scenario/_scenario_converter.py index 8fe52a2a7f..d2e9797e5c 100644 --- a/taipy/core/scenario/_scenario_converter.py +++ b/taipy/core/scenario/_scenario_converter.py @@ -26,16 +26,21 @@ class _ScenarioConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, scenario: Scenario) -> _ScenarioModel: - sequences: Dict[str, Dict[str, Union[List[TaskId], Dict, List]]] = {} - for p_name, sequence_data in scenario._sequences.items(): - sequences[p_name] = { + sequences: Dict[str, Dict[str, Union[List[TaskId], Dict, List]]] = { + p_name: { Scenario._SEQUENCE_TASKS_KEY: [ - t.id if isinstance(t, Task) else t for t in sequence_data.get("tasks", []) + t.id if isinstance(t, Task) else t + for t in sequence_data.get("tasks", []) ], - Scenario._SEQUENCE_PROPERTIES_KEY: sequence_data.get("properties", {}), - Scenario._SEQUENCE_SUBSCRIBERS_KEY: _utils._fcts_to_dict(sequence_data.get("subscribers", [])), + Scenario._SEQUENCE_PROPERTIES_KEY: sequence_data.get( + "properties", {} + ), + Scenario._SEQUENCE_SUBSCRIBERS_KEY: _utils._fcts_to_dict( + sequence_data.get("subscribers", []) + ), } - + for p_name, sequence_data in scenario._sequences.items() + } return _ScenarioModel( id=scenario.id, config_id=scenario.config_id, @@ -56,9 +61,7 @@ def _entity_to_model(cls, scenario: Scenario) -> _ScenarioModel: @classmethod def _model_to_entity(cls, model: _ScenarioModel) -> Scenario: - tasks: Union[Set[TaskId], Set[Task], Set] = set() - if model.tasks: - tasks = set(model.tasks) + tasks = set(model.tasks) if model.tasks else set() if model.sequences: for sequence_name, sequence_data in model.sequences.items(): if subscribers := sequence_data.get(Scenario._SEQUENCE_SUBSCRIBERS_KEY): diff --git a/taipy/core/scenario/_scenario_manager.py b/taipy/core/scenario/_scenario_manager.py index c2c2b35032..046af0d020 100644 --- a/taipy/core/scenario/_scenario_manager.py +++ b/taipy/core/scenario/_scenario_manager.py @@ -141,7 +141,7 @@ def _create( sequence_tasks.append(task) else: non_existing_sequence_task_config_in_scenario_config.add(sequence_task_config.id) - if len(non_existing_sequence_task_config_in_scenario_config) > 0: + if non_existing_sequence_task_config_in_scenario_config: raise SequenceTaskConfigDoesNotExistInSameScenarioConfig( list(non_existing_sequence_task_config_in_scenario_config), sequence_name, str(config.id) ) @@ -182,7 +182,7 @@ def _create( raise InvalidSscenario(scenario.id) actual_sequences = scenario._get_sequences() - for sequence_name in sequences.keys(): + for sequence_name in sequences: if not actual_sequences[sequence_name]._is_consistent(): raise InvalidSequence(actual_sequences[sequence_name].id) Notifier.publish(_make_event(actual_sequences[sequence_name], EventOperation.CREATION)) @@ -230,26 +230,18 @@ def __get_status_notifier_callbacks(cls, scenario: Scenario) -> List: @classmethod def _get_primary(cls, cycle: Cycle) -> Optional[Scenario]: scenarios = cls._get_all_by_cycle(cycle) - for scenario in scenarios: - if scenario.is_primary: - return scenario - return None + return next((scenario for scenario in scenarios if scenario.is_primary), None) @classmethod def _get_by_tag(cls, cycle: Cycle, tag: str) -> Optional[Scenario]: scenarios = cls._get_all_by_cycle(cycle) - for scenario in scenarios: - if scenario.has_tag(tag): - return scenario - return None + return next( + (scenario for scenario in scenarios if scenario.has_tag(tag)), None + ) @classmethod def _get_all_by_tag(cls, tag: str) -> List[Scenario]: - scenarios = [] - for scenario in cls._get_all(): - if scenario.has_tag(tag): - scenarios.append(scenario) - return scenarios + return [scenario for scenario in cls._get_all() if scenario.has_tag(tag)] @classmethod def _get_all_by_cycle(cls, cycle: Cycle) -> List[Scenario]: @@ -263,32 +255,25 @@ def _get_all_by_cycle(cls, cycle: Cycle) -> List[Scenario]: @classmethod def _get_primary_scenarios(cls) -> List[Scenario]: - primary_scenarios = [] - for scenario in cls._get_all(): - if scenario.is_primary: - primary_scenarios.append(scenario) - return primary_scenarios + return [scenario for scenario in cls._get_all() if scenario.is_primary] @classmethod def _is_promotable_to_primary(cls, scenario: Union[Scenario, ScenarioId]) -> bool: if isinstance(scenario, str): scenario = cls._get(scenario) - if scenario and not scenario.is_primary and scenario.cycle: - return True - return False + return bool(scenario and not scenario.is_primary and scenario.cycle) @classmethod def _set_primary(cls, scenario: Scenario): - if scenario.cycle: - primary_scenario = cls._get_primary(scenario.cycle) - # To prevent SAME scenario updating out of Context Manager - if primary_scenario and primary_scenario != scenario: - primary_scenario.is_primary = False # type: ignore - scenario.is_primary = True # type: ignore - else: + if not scenario.cycle: raise DoesNotBelongToACycle( f"Can't set scenario {scenario.id} to primary because it doesn't belong to a cycle." ) + primary_scenario = cls._get_primary(scenario.cycle) + # To prevent SAME scenario updating out of Context Manager + if primary_scenario and primary_scenario != scenario: + primary_scenario.is_primary = False # type: ignore + scenario.is_primary = True # type: ignore @classmethod def _tag(cls, scenario: Scenario, tag: str): @@ -296,8 +281,7 @@ def _tag(cls, scenario: Scenario, tag: str): if len(tags) > 0 and tag not in tags: raise UnauthorizedTagError(f"Tag `{tag}` not authorized by scenario configuration `{scenario.config_id}`") if scenario.cycle: - old_tagged_scenario = cls._get_by_tag(scenario.cycle, tag) - if old_tagged_scenario: + if old_tagged_scenario := cls._get_by_tag(scenario.cycle, tag): old_tagged_scenario.remove_tag(tag) cls._set(old_tagged_scenario) scenario._add_tag(tag) @@ -319,29 +303,29 @@ def _compare(cls, *scenarios: Scenario, data_node_config_id: Optional[str] = Non if len(scenarios) < 2: raise InsufficientScenarioToCompare("At least two scenarios are required to compare.") - if not all(scenarios[0].config_id == scenario.config_id for scenario in scenarios): + if any( + scenarios[0].config_id != scenario.config_id for scenario in scenarios + ): raise DifferentScenarioConfigs("Scenarios to compare must have the same configuration.") - if scenario_config := _ScenarioManager.__get_config(scenarios[0]): - results = {} - if data_node_config_id: - if data_node_config_id in scenario_config.comparators.keys(): - dn_comparators = {data_node_config_id: scenario_config.comparators[data_node_config_id]} - else: - raise NonExistingComparator(f"Data node config {data_node_config_id} has no comparator.") + if not (scenario_config := _ScenarioManager.__get_config(scenarios[0])): + raise NonExistingScenarioConfig(scenarios[0].config_id) + results = {} + if data_node_config_id: + if data_node_config_id in scenario_config.comparators.keys(): + dn_comparators = {data_node_config_id: scenario_config.comparators[data_node_config_id]} else: - dn_comparators = scenario_config.comparators - - for data_node_config_id, comparators in dn_comparators.items(): - data_nodes = [scenario.__getattr__(data_node_config_id).read() for scenario in scenarios] - results[data_node_config_id] = { - comparator.__name__: comparator(*data_nodes) for comparator in comparators - } + raise NonExistingComparator(f"Data node config {data_node_config_id} has no comparator.") + else: + dn_comparators = scenario_config.comparators - return results + for data_node_config_id, comparators in dn_comparators.items(): + data_nodes = [scenario.__getattr__(data_node_config_id).read() for scenario in scenarios] + results[data_node_config_id] = { + comparator.__name__: comparator(*data_nodes) for comparator in comparators + } - else: - raise NonExistingScenarioConfig(scenarios[0].config_id) + return results @staticmethod def __get_config(scenario: Scenario): diff --git a/taipy/core/scenario/_scenario_manager_factory.py b/taipy/core/scenario/_scenario_manager_factory.py index d82b54be20..2a7afef560 100644 --- a/taipy/core/scenario/_scenario_manager_factory.py +++ b/taipy/core/scenario/_scenario_manager_factory.py @@ -26,11 +26,13 @@ class _ScenarioManagerFactory(_ManagerFactory): def _build_manager(cls) -> Type[_ScenarioManager]: # type: ignore if cls._using_enterprise(): scenario_manager = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".scenario._scenario_manager", "_ScenarioManager" - ) # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.scenario._scenario_manager", + "_ScenarioManager", + ) build_repository = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".scenario._scenario_manager_factory", "_ScenarioManagerFactory" - )._build_repository # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.scenario._scenario_manager_factory", + "_ScenarioManagerFactory", + )._build_repository else: scenario_manager = _ScenarioManager build_repository = cls._build_repository diff --git a/taipy/core/scenario/scenario.py b/taipy/core/scenario/scenario.py index 1422644cff..ac2c9dd1c6 100644 --- a/taipy/core/scenario/scenario.py +++ b/taipy/core/scenario/scenario.py @@ -109,11 +109,14 @@ def __init__( self._properties = _Properties(self, **properties) self._sequences: Dict[str, Dict] = sequences or {} - _scenario_task_ids = set([task.id if isinstance(task, Task) else task for task in self._tasks]) + _scenario_task_ids = { + task.id if isinstance(task, Task) else task for task in self._tasks + } for sequence_name, sequence_data in self._sequences.items(): - sequence_task_ids = set( - [task.id if isinstance(task, Task) else task for task in sequence_data.get("tasks", [])] - ) + sequence_task_ids = { + task.id if isinstance(task, Task) else task + for task in sequence_data.get("tasks", []) + } self.__check_sequence_tasks_exist_in_scenario_tasks( sequence_name, sequence_task_ids, self.id, _scenario_task_ids ) @@ -168,7 +171,7 @@ def sequences( ): self._sequences = sequences actual_sequences = self._get_sequences() - for sequence_name in sequences.keys(): + for sequence_name in sequences: if not actual_sequences[sequence_name]._is_consistent(): raise InvalidSequence(actual_sequences[sequence_name].id) @@ -192,8 +195,13 @@ def add_sequence( SequenceTaskDoesNotExistInScenario^: If a task in the sequence does not exist in the scenario. """ _scenario = _Reloader()._reload(self._MANAGER_NAME, self) - _scenario_task_ids = set([task.id if isinstance(task, Task) else task for task in _scenario._tasks]) - _sequence_task_ids: Set[TaskId] = set([task.id if isinstance(task, Task) else task for task in tasks]) + _scenario_task_ids = { + task.id if isinstance(task, Task) else task + for task in _scenario._tasks + } + _sequence_task_ids: Set[TaskId] = { + task.id if isinstance(task, Task) else task for task in tasks + } self.__check_sequence_tasks_exist_in_scenario_tasks(name, _sequence_task_ids, self.id, _scenario_task_ids) _sequences = _Reloader()._reload(self._MANAGER_NAME, self)._sequences _sequences.update( @@ -225,9 +233,14 @@ def add_sequences(self, sequences: Dict[str, Union[List[Task], List[TaskId]]]): SequenceTaskDoesNotExistInScenario^: If a task in the sequence does not exist in the scenario. """ _scenario = _Reloader()._reload(self._MANAGER_NAME, self) - _sc_task_ids = set([task.id if isinstance(task, Task) else task for task in _scenario._tasks]) + _sc_task_ids = { + task.id if isinstance(task, Task) else task + for task in _scenario._tasks + } for name, tasks in sequences.items(): - _seq_task_ids: Set[TaskId] = set([task.id if isinstance(task, Task) else task for task in tasks]) + _seq_task_ids: Set[TaskId] = { + task.id if isinstance(task, Task) else task for task in tasks + } self.__check_sequence_tasks_exist_in_scenario_tasks(name, _seq_task_ids, self.id, _sc_task_ids) # Need to parse twice the sequences to avoid adding some sequences and not others in case of exception for name, tasks in sequences.items(): @@ -269,11 +282,11 @@ def remove_sequences(self, sequence_names: List[str]): def __check_sequence_tasks_exist_in_scenario_tasks( sequence_name: str, sequence_task_ids: Set[TaskId], scenario_id: ScenarioId, scenario_task_ids: Set[TaskId] ): - non_existing_sequence_task_ids_in_scenario = set() - for sequence_task_id in sequence_task_ids: - if sequence_task_id not in scenario_task_ids: - non_existing_sequence_task_ids_in_scenario.add(sequence_task_id) - if len(non_existing_sequence_task_ids_in_scenario) > 0: + if non_existing_sequence_task_ids_in_scenario := { + sequence_task_id + for sequence_task_id in sequence_task_ids + if sequence_task_id not in scenario_task_ids + }: raise SequenceTaskDoesNotExistInScenario( list(non_existing_sequence_task_ids_in_scenario), sequence_name, scenario_id ) @@ -584,13 +597,17 @@ def _is_consistent(self) -> bool: return True if not nx.is_directed_acyclic_graph(dag): return False - for left_node, right_node in dag.edges: - if (isinstance(left_node, DataNode) and isinstance(right_node, Task)) or ( - isinstance(left_node, Task) and isinstance(right_node, DataNode) - ): - continue - return False - return True + return not any( + ( + not isinstance(left_node, DataNode) + or not isinstance(right_node, Task) + ) + and ( + not isinstance(left_node, Task) + or not isinstance(right_node, DataNode) + ) + for left_node, right_node in dag.edges + ) @_make_event.register(Scenario) diff --git a/taipy/core/sequence/_sequence_manager.py b/taipy/core/sequence/_sequence_manager.py index f9280f19e2..359682b378 100644 --- a/taipy/core/sequence/_sequence_manager.py +++ b/taipy/core/sequence/_sequence_manager.py @@ -150,14 +150,13 @@ def _create( task_manager = _TaskManagerFactory._build_manager() _tasks: List[Task] = [] for task in tasks: - if not isinstance(task, Task): - if _task := task_manager._get(task): - _tasks.append(_task) - else: - raise NonExistingTask(task) - else: + if isinstance(task, Task): _tasks.append(task) + elif _task := task_manager._get(task): + _tasks.append(_task) + else: + raise NonExistingTask(task) properties = properties if properties else {} properties["name"] = sequence_name version = version if version else cls._get_latest_version() @@ -226,9 +225,13 @@ def _get_all_by(cls, filters: Optional[List[Dict]] = None) -> List[Sequence]: filtered_sequences = [] for sequence in sequences: - for filter in filters: - if all([getattr(sequence, key) == item for key, item in filter.items()]): - filtered_sequences.append(sequence) + filtered_sequences.extend( + sequence + for filter in filters + if all( + getattr(sequence, key) == item for key, item in filter.items() + ) + ) return filtered_sequences @classmethod @@ -332,7 +335,7 @@ def _exists(cls, entity_id: str) -> bool: """ Returns True if the entity id exists. """ - return True if cls._get(entity_id) else False + return bool(cls._get(entity_id)) @classmethod def _export(cls, id: str, folder_path: Union[str, pathlib.Path]): @@ -353,12 +356,11 @@ def _export(cls, id: str, folder_path: Union[str, pathlib.Path]): sequence = {"id": id, "owner_id": scenario_id, "parent_ids": [scenario_id], "name": sequence_name} scenario = _ScenarioManagerFactory._build_manager()._get(scenario_id) - if sequence_data := scenario._sequences.get(sequence_name, None): - sequence.update(sequence_data) - with open(export_path, "w", encoding="utf-8") as export_file: - export_file.write(json.dumps(sequence)) - else: + if not (sequence_data := scenario._sequences.get(sequence_name, None)): raise ModelNotFound(cls._model_name, id) + sequence.update(sequence_data) + with open(export_path, "w", encoding="utf-8") as export_file: + export_file.write(json.dumps(sequence)) @classmethod def __log_error_entity_not_found(cls, sequence_id: Union[SequenceId, str]): diff --git a/taipy/core/sequence/_sequence_manager_factory.py b/taipy/core/sequence/_sequence_manager_factory.py index 2f441e730e..ac6dd37dcd 100644 --- a/taipy/core/sequence/_sequence_manager_factory.py +++ b/taipy/core/sequence/_sequence_manager_factory.py @@ -19,10 +19,11 @@ class _SequenceManagerFactory(_ManagerFactory): @classmethod def _build_manager(cls) -> Type[_SequenceManager]: # type: ignore - if cls._using_enterprise(): - sequence_manager = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".sequence._sequence_manager", "_SequenceManager" - ) # type: ignore - else: - sequence_manager = _SequenceManager - return sequence_manager # type: ignore + return ( + _load_fct( + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.sequence._sequence_manager", + "_SequenceManager", + ) + if cls._using_enterprise() + else _SequenceManager + ) diff --git a/taipy/core/sequence/sequence.py b/taipy/core/sequence/sequence.py index a31383f374..ededd551e5 100644 --- a/taipy/core/sequence/sequence.py +++ b/taipy/core/sequence/sequence.py @@ -135,13 +135,17 @@ def _is_consistent(self) -> bool: return False if not nx.is_weakly_connected(dag): return False - for left_node, right_node in dag.edges: - if (isinstance(left_node, DataNode) and isinstance(right_node, Task)) or ( - isinstance(left_node, Task) and isinstance(right_node, DataNode) - ): - continue - return False - return True + return not any( + ( + not isinstance(left_node, DataNode) + or not isinstance(right_node, Task) + ) + and ( + not isinstance(left_node, Task) + or not isinstance(right_node, DataNode) + ) + for left_node, right_node in dag.edges + ) def _get_tasks(self) -> Dict[str, Task]: from ..task._task_manager_factory import _TaskManagerFactory diff --git a/taipy/core/submission/_submission_converter.py b/taipy/core/submission/_submission_converter.py index a7a85cb5a2..021cef4bba 100644 --- a/taipy/core/submission/_submission_converter.py +++ b/taipy/core/submission/_submission_converter.py @@ -34,7 +34,7 @@ def _entity_to_model(cls, submission: Submission) -> _SubmissionModel: @classmethod def _model_to_entity(cls, model: _SubmissionModel) -> Submission: - submission = Submission( + return Submission( entity_id=model.entity_id, entity_type=model.entity_type, entity_config_id=model.entity_config_id, @@ -44,4 +44,3 @@ def _model_to_entity(cls, model: _SubmissionModel) -> Submission: submission_status=model.submission_status, version=model.version, ) - return submission diff --git a/taipy/core/submission/_submission_manager.py b/taipy/core/submission/_submission_manager.py index b91c30af53..623f4b3199 100644 --- a/taipy/core/submission/_submission_manager.py +++ b/taipy/core/submission/_submission_manager.py @@ -46,10 +46,16 @@ def _create(cls, entity_id: str, entity_type: str, entity_config: Optional[str]) @classmethod def _get_latest(cls, entity: Union[Scenario, Sequence, Task]) -> Optional[Submission]: entity_id = entity.id if not isinstance(entity, str) else entity - submissions_of_task = list(filter(lambda submission: submission.entity_id == entity_id, cls._get_all())) - if len(submissions_of_task) == 0: - return None - if len(submissions_of_task) == 1: - return submissions_of_task[0] + if submissions_of_task := list( + filter( + lambda submission: submission.entity_id == entity_id, + cls._get_all(), + ) + ): + return ( + submissions_of_task[0] + if len(submissions_of_task) == 1 + else max(submissions_of_task) + ) else: - return max(submissions_of_task) + return None diff --git a/taipy/core/submission/_submission_manager_factory.py b/taipy/core/submission/_submission_manager_factory.py index cd7b6689cf..4b4dac9d58 100644 --- a/taipy/core/submission/_submission_manager_factory.py +++ b/taipy/core/submission/_submission_manager_factory.py @@ -26,12 +26,13 @@ class _SubmissionManagerFactory(_ManagerFactory): def _build_manager(cls) -> Type[_SubmissionManager]: # type: ignore if cls._using_enterprise(): submission_manager = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".submission._submission_manager", "_SubmissionManager" - ) # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.submission._submission_manager", + "_SubmissionManager", + ) build_repository = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".submission._submission_manager_factory", + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.submission._submission_manager_factory", "_SubmissionManagerFactory", - )._build_repository # type: ignore + )._build_repository else: submission_manager = _SubmissionManager build_repository = cls._build_repository diff --git a/taipy/core/submission/submission.py b/taipy/core/submission/submission.py index 86783d3439..4212fd7ee7 100644 --- a/taipy/core/submission/submission.py +++ b/taipy/core/submission/submission.py @@ -112,13 +112,9 @@ def get_simple_label(self) -> str: @property # type: ignore @_self_reload(_MANAGER_NAME) def jobs(self) -> List[Job]: - jobs = [] job_manager = _JobManagerFactory._build_manager() - for job in self._jobs: - jobs.append(job_manager._get(job)) - - return jobs + return [job_manager._get(job) for job in self._jobs] @jobs.setter # type: ignore @_self_setter(_MANAGER_NAME) @@ -169,13 +165,13 @@ def _update_submission_status(self, job: Job): elif job_status == Status.BLOCKED: self.__blocked_jobs.add(job.id) self.__pending_jobs.discard(job.id) - elif job_status == Status.PENDING or job_status == Status.SUBMITTED: + elif job_status in [Status.PENDING, Status.SUBMITTED]: self.__pending_jobs.add(job.id) self.__blocked_jobs.discard(job.id) elif job_status == Status.RUNNING: self.__running_jobs.add(job.id) self.__pending_jobs.discard(job.id) - elif job_status == Status.COMPLETED or job_status == Status.SKIPPED: + elif job_status in [Status.COMPLETED, Status.SKIPPED]: self.__completed = True self.__blocked_jobs.discard(job.id) self.__pending_jobs.discard(job.id) diff --git a/taipy/core/taipy.py b/taipy/core/taipy.py index 3bcf419e9f..e54b09a37e 100644 --- a/taipy/core/taipy.py +++ b/taipy/core/taipy.py @@ -860,7 +860,7 @@ def update_parent_dict(parents_set, parent_dict): current_parent_dict: Dict[str, Set] = {} for parent in entity.parent_ids: parent_entity = get(parent) - if parent_entity._MANAGER_NAME in current_parent_dict.keys(): + if parent_entity._MANAGER_NAME in current_parent_dict: current_parent_dict[parent_entity._MANAGER_NAME].add(parent_entity) else: current_parent_dict[parent_entity._MANAGER_NAME] = {parent_entity} @@ -892,7 +892,7 @@ def get_cycles_scenarios() -> Dict[Optional[Cycle], List[Scenario]]: cycles_scenarios: Dict[Optional[Cycle], List[Scenario]] = {} for scenario in get_scenarios(): - if scenario.cycle in cycles_scenarios.keys(): + if scenario.cycle in cycles_scenarios: cycles_scenarios[scenario.cycle].append(scenario) else: cycles_scenarios[scenario.cycle] = [scenario] diff --git a/taipy/core/task/_task_manager.py b/taipy/core/task/_task_manager.py index 15f0423033..587389eafa 100644 --- a/taipy/core/task/_task_manager.py +++ b/taipy/core/task/_task_manager.py @@ -70,7 +70,7 @@ def _bulk_get_or_create( Config.data_nodes[dnc.id] for dnc in task_config.input_configs ] task_config_data_nodes = [data_nodes[dn_config] for dn_config in task_dn_configs] - scope = min(dn.scope for dn in task_config_data_nodes) if len(task_config_data_nodes) != 0 else Scope.GLOBAL + scope = min((dn.scope for dn in task_config_data_nodes), default=Scope.GLOBAL) owner_id: Union[Optional[SequenceId], Optional[ScenarioId], Optional[CycleId]] if scope == Scope.SCENARIO: owner_id = scenario_id @@ -87,9 +87,7 @@ def _bulk_get_or_create( tasks = [] for task_config, owner_id in tasks_configs_and_owner_id: - if task := tasks_by_config.get((task_config, owner_id)): - tasks.append(task) - else: + if not (task := tasks_by_config.get((task_config, owner_id))): version = _VersionManagerFactory._build_manager()._get_latest_version() inputs = [ data_nodes[input_config] @@ -115,7 +113,7 @@ def _bulk_get_or_create( dn._parent_ids.update([task.id]) cls._set(task) Notifier.publish(_make_event(task, EventOperation.CREATION)) - tasks.append(task) + tasks.append(task) return tasks @classmethod diff --git a/taipy/core/task/_task_manager_factory.py b/taipy/core/task/_task_manager_factory.py index b1535711d6..360e32e993 100644 --- a/taipy/core/task/_task_manager_factory.py +++ b/taipy/core/task/_task_manager_factory.py @@ -26,11 +26,13 @@ class _TaskManagerFactory(_ManagerFactory): def _build_manager(cls) -> Type[_TaskManager]: # type: ignore if cls._using_enterprise(): task_manager = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".task._task_manager", "_TaskManager" - ) # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.task._task_manager", + "_TaskManager", + ) build_repository = _load_fct( - cls._TAIPY_ENTERPRISE_CORE_MODULE + ".task._task_manager_factory", "_TaskManagerFactory" - )._build_repository # type: ignore + f"{cls._TAIPY_ENTERPRISE_CORE_MODULE}.task._task_manager_factory", + "_TaskManagerFactory", + )._build_repository else: task_manager = _TaskManager build_repository = cls._build_repository diff --git a/taipy/core/task/_task_model.py b/taipy/core/task/_task_model.py index 2c671c1ee0..ad3b2cff49 100644 --- a/taipy/core/task/_task_model.py +++ b/taipy/core/task/_task_model.py @@ -53,7 +53,9 @@ def from_dict(data: Dict[str, Any]): return _TaskModel( id=data["id"], owner_id=data.get("owner_id"), - parent_ids=_BaseModel._deserialize_attribute(data.get("parent_ids", [])), + parent_ids=_BaseModel._deserialize_attribute( + data.get("parent_ids", []) + ), config_id=data["config_id"], input_ids=_BaseModel._deserialize_attribute(data["input_ids"]), function_name=data["function_name"], @@ -61,7 +63,9 @@ def from_dict(data: Dict[str, Any]): output_ids=_BaseModel._deserialize_attribute(data["output_ids"]), version=data["version"], skippable=data["skippable"], - properties=_BaseModel._deserialize_attribute(data["properties"] if "properties" in data.keys() else {}), + properties=_BaseModel._deserialize_attribute( + data.get("properties", {}) + ), ) def to_list(self): diff --git a/taipy/core/task/task.py b/taipy/core/task/task.py index 5772d75264..c9389450f1 100644 --- a/taipy/core/task/task.py +++ b/taipy/core/task/task.py @@ -161,8 +161,11 @@ def scope(self) -> Scope: either no input or no output. """ data_nodes = list(self.__input.values()) + list(self.__output.values()) - scope = Scope(min(dn.scope for dn in data_nodes)) if len(data_nodes) != 0 else Scope.GLOBAL - return scope + return ( + Scope(min(dn.scope for dn in data_nodes)) + if len(data_nodes) != 0 + else Scope.GLOBAL + ) @property def version(self): diff --git a/taipy/gui/_gui_cli.py b/taipy/gui/_gui_cli.py index faedaee67b..9f4ec89a6d 100644 --- a/taipy/gui/_gui_cli.py +++ b/taipy/gui/_gui_cli.py @@ -99,6 +99,6 @@ def parse_arguments(cls): @classmethod def __add_taipy_prefix(cls, key: str): if key.startswith("--no-"): - return key[:5] + "taipy-" + key[5:] + return f"{key[:5]}taipy-{key[5:]}" - return key[:2] + "taipy-" + key[2:] + return f"{key[:2]}taipy-{key[2:]}" diff --git a/taipy/gui/_renderers/_markdown/preproc.py b/taipy/gui/_renderers/_markdown/preproc.py index 0ef13e26db..5727ea50ba 100644 --- a/taipy/gui/_renderers/_markdown/preproc.py +++ b/taipy/gui/_renderers/_markdown/preproc.py @@ -73,8 +73,8 @@ def _make_prop_pair(self, prop_name: str, prop_value: str) -> Tuple[str, str]: return (prop_name, prop_value.replace("\\|", "|")) def run(self, lines: List[str]) -> List[str]: - new_lines = [] tag_queue = [] + new_lines = [] for line_count, line in enumerate(lines, start=1): new_line = "" last_index = 0 @@ -150,11 +150,7 @@ def run(self, lines: List[str]) -> List[str]: + new_line[m.end() :] ) else: - new_line = ( - new_line[: m.start()] - + f"
No matching opened tag on line {line_count}
" - + new_line[m.end() :] - ) + new_line = f"{new_line[:m.start()]}
No matching opened tag on line {line_count}
{new_line[m.end():]}" _warn(f"Line {line_count} has an unmatched closing tag.") # append the new line new_lines.append(new_line) diff --git a/taipy/gui/_renderers/builder.py b/taipy/gui/_renderers/builder.py index 87d0fcbd79..29f476c6c4 100644 --- a/taipy/gui/_renderers/builder.py +++ b/taipy/gui/_renderers/builder.py @@ -253,7 +253,7 @@ def set_dynamic_dict_attribute(self, name: str, default_value: t.Optional[t.Dict vals = [x.strip().split(":") for x in dict_attr.split(";")] dict_attr = {val[0].strip(): val[1].strip() for val in vals if len(val) > 1} if isinstance(dict_attr, (dict, _MapDict)): - self.__set_json_attribute(_to_camel_case("default_" + name), dict_attr) + self.__set_json_attribute(_to_camel_case(f"default_{name}"), dict_attr) else: _warn(f"{self.__element_name}: {name} should be a dict: '{str(dict_attr)}'.") if dict_hash := self.__hashes.get(name): @@ -438,7 +438,11 @@ def _get_adapter(self, var_name: str, property_name: t.Optional[str] = None, mul return self def __filter_attribute_names(self, names: t.Iterable[str]): - return [k for k in self.__attributes if k in names or any(k.startswith(n + "[") for n in names)] + return [ + k + for k in self.__attributes + if k in names or any(k.startswith(f"{n}[") for n in names) + ] def __get_holded_name(self, key: str): name = self.__hashes.get(key) @@ -482,10 +486,10 @@ def _get_dataframe_attributes(self) -> "_Builder": data, self.__attributes.get("columns", {}), col_types, date_format, self.__attributes.get("number_format") ) - rebuild_fn_hash = self.__build_rebuild_fn( - self.__gui._get_rebuild_fn_name("_tbl_cols"), _Builder.__TABLE_COLUMNS_DEPS - ) - if rebuild_fn_hash: + if rebuild_fn_hash := self.__build_rebuild_fn( + self.__gui._get_rebuild_fn_name("_tbl_cols"), + _Builder.__TABLE_COLUMNS_DEPS, + ): self.__set_react_attribute("columns", rebuild_fn_hash) if col_dict is not None: _enhance_columns(self.__attributes, self.__hashes, col_dict, self.__element_name) @@ -517,10 +521,10 @@ def _get_dataframe_attributes(self) -> "_Builder": def _get_chart_config(self, default_type: str, default_mode: str): self.__attributes["_default_type"] = default_type self.__attributes["_default_mode"] = default_mode - rebuild_fn_hash = self.__build_rebuild_fn( - self.__gui._get_rebuild_fn_name("_chart_conf"), _CHART_NAMES + ("_default_type", "_default_mode", "data") - ) - if rebuild_fn_hash: + if rebuild_fn_hash := self.__build_rebuild_fn( + self.__gui._get_rebuild_fn_name("_chart_conf"), + _CHART_NAMES + ("_default_type", "_default_mode", "data"), + ): self.__set_react_attribute("config", rebuild_fn_hash) # read column definitions @@ -588,11 +592,7 @@ def _get_list_attribute(self, name: str, list_type: PropertyType): if isinstance(list_val, str): list_val = list(list_val.split(";")) if isinstance(list_val, list): - # TODO catch the cast exception - if list_type.value == PropertyType.number.value: - list_val = [int(v) for v in list_val] - else: - list_val = [int(v) for v in list_val] + list_val = [int(v) for v in list_val] else: if list_val is not None: _warn(f"{self.__element_name}: {name} should be a list.") @@ -603,7 +603,10 @@ def _get_list_attribute(self, name: str, list_type: PropertyType): return self def __set_class_names(self): - self.set_attribute("libClassName", self.__lib_name + "-" + self.__control_type.replace("_", "-")) + self.set_attribute( + "libClassName", + f"{self.__lib_name}-" + self.__control_type.replace("_", "-"), + ) return self.__set_dynamic_string_attribute("class_name", dynamic_property_name="dynamic_class_name") def _set_dataType(self): diff --git a/taipy/gui/_renderers/factory.py b/taipy/gui/_renderers/factory.py index e44d5de8a0..e1e7e16bf7 100644 --- a/taipy/gui/_renderers/factory.py +++ b/taipy/gui/_renderers/factory.py @@ -590,8 +590,7 @@ def __get_library_element(name: str): for lib in _Factory.__LIBRARIES.get(parts[0], []): elts = lib.get_elements() if isinstance(elts, dict): - element = elts.get(element_name) - if element: + if element := elts.get(element_name): return lib, element_name, element else: element_name = name @@ -599,8 +598,7 @@ def __get_library_element(name: str): for lib in libs: elts = lib.get_elements() if isinstance(elts, dict): - element = elts.get(element_name) - if element: + if element := elts.get(element_name): return lib, element_name, element return None, None, None diff --git a/taipy/gui/builder/_api_generator.py b/taipy/gui/builder/_api_generator.py index a046b7e155..29be4bac9c 100644 --- a/taipy/gui/builder/_api_generator.py +++ b/taipy/gui/builder/_api_generator.py @@ -31,10 +31,15 @@ def __init__(self): @staticmethod def find_default_property(property_list: t.List[t.Dict[str, t.Any]]) -> str: - for property in property_list: - if "default_property" in property and property["default_property"] is True: - return property["name"] - return "" + return next( + ( + property["name"] + for property in property_list + if "default_property" in property + and property["default_property"] is True + ), + "", + ) def add_default(self): current_frame = inspect.currentframe() diff --git a/taipy/gui/builder/_element.py b/taipy/gui/builder/_element.py index ff43deed27..d694a74957 100644 --- a/taipy/gui/builder/_element.py +++ b/taipy/gui/builder/_element.py @@ -152,13 +152,15 @@ def __init__(self, *args, **kwargs): self._content = args[1] if len(args) > 1 else "" def _render(self, gui: "Gui") -> str: - if self._ELEMENT_NAME: - attrs = "" - if self._properties: - attrs = " " + " ".join([f'{k}="{str(v)}"' for k, v in self._properties.items()]) - return f"<{self._ELEMENT_NAME}{attrs}>{self._content}{self._render_children(gui)}" - else: + if not self._ELEMENT_NAME: return self._content + attrs = ( + " " + + " ".join([f'{k}="{str(v)}"' for k, v in self._properties.items()]) + if self._properties + else "" + ) + return f"<{self._ELEMENT_NAME}{attrs}>{self._content}{self._render_children(gui)}" class _Control(_Element): diff --git a/taipy/gui/data/content_accessor.py b/taipy/gui/data/content_accessor.py index 1a33ed5c26..e94c1543dd 100644 --- a/taipy/gui/data/content_accessor.py +++ b/taipy/gui/data/content_accessor.py @@ -40,16 +40,16 @@ def __init__(self, data_url_max_size: int) -> None: def get_path(self, path: pathlib.Path) -> str: url_path = self.__paths.get(path) if url_path is None: - self.__paths[path] = url_path = "taipyStatic" + str(len(self.__paths)) + self.__paths[path] = url_path = f"taipyStatic{len(self.__paths)}" return url_path def get_content_path( self, url_path: str, file_name: str, bypass: t.Optional[str] ) -> t.Tuple[t.Union[pathlib.Path, None], bool]: - content_path = self.__content_paths.get(url_path) - if not content_path: + if content_path := self.__content_paths.get(url_path): + return (content_path, bypass is not None or self.__url_is_image.get(f"{url_path}/{file_name}", False)) + else: return (None, True) - return (content_path, bypass is not None or self.__url_is_image.get(f"{url_path}/{file_name}", False)) def __get_mime_from_file(self, path: pathlib.Path): if _has_magic_module: @@ -62,10 +62,8 @@ def __get_mime_from_file(self, path: pathlib.Path): def __get_display_name(self, var_name: str) -> str: if not isinstance(var_name, str): return var_name - if var_name.startswith("_tpC_"): - var_name = var_name[5:] - if var_name.startswith("tpec_"): - var_name = var_name[5:] + var_name = var_name.removeprefix("_tpC_") + var_name = var_name.removeprefix("tpec_") return _variable_decode(var_name)[0] def get_info(self, var_name: str, value: t.Any, image: bool) -> t.Union[str, t.Tuple[str], t.Any]: # noqa: C901 diff --git a/taipy/gui/data/utils.py b/taipy/gui/data/utils.py index 095a4c8f49..7ffda88bc7 100644 --- a/taipy/gui/data/utils.py +++ b/taipy/gui/data/utils.py @@ -52,12 +52,12 @@ def _is_applicable(self, data: t.Any, nb_rows_max: int, chart_mode: str): if chart_mode not in self._CHART_MODES: _warn(f"{type(self).__name__} is only applicable for {' '.join(self._CHART_MODES)}.") return False - if self.threshold is None: - if nb_rows_max < len(data): - return True - elif self.threshold < len(data): - return True - return False + return ( + self.threshold is None + and nb_rows_max < len(data) + or self.threshold is not None + and self.threshold < len(data) + ) @abstractmethod def decimate(self, data: np.ndarray, payload: t.Dict[str, t.Any]) -> np.ndarray: diff --git a/taipy/gui/extension/library.py b/taipy/gui/extension/library.py index f1843e6964..a6b7a2aac4 100644 --- a/taipy/gui/extension/library.py +++ b/taipy/gui/extension/library.py @@ -171,11 +171,7 @@ def _call_builder( xhtml = self._render_xhtml(attributes) try: xml_root = etree.fromstring(xhtml) - if is_html: - return xhtml, name - else: - return xml_root - + return (xhtml, name) if is_html else xml_root except Exception as e: _warn(f"{name}.render_xhtml() did not return a valid XHTML string", e) return f"{name}.render_xhtml() did not return a valid XHTML string. {e}" diff --git a/taipy/gui/gui.py b/taipy/gui/gui.py index cead6ad5c6..ec51179bb4 100644 --- a/taipy/gui/gui.py +++ b/taipy/gui/gui.py @@ -372,22 +372,20 @@ def add_library(library: ElementLibrary) -> None: elements in several `ElementLibrary^` instances, but still refer to these elements with the same prefix in the page definitions. """ - if isinstance(library, ElementLibrary): - _Factory.set_library(library) - library_name = library.get_name() - if library_name.isidentifier(): - libs = Gui.__extensions.get(library_name) - if libs is None: - Gui.__extensions[library_name] = [library] - else: - libs.append(library) - _ElementApiGenerator().add_library(library) - else: - raise NameError(f"ElementLibrary passed to add_library() has an invalid name: '{library_name}'") - else: # pragma: no cover + if not isinstance(library, ElementLibrary): raise RuntimeError( f"add_library() argument should be a subclass of ElementLibrary instead of '{type(library)}'" ) + _Factory.set_library(library) + library_name = library.get_name() + if not library_name.isidentifier(): + raise NameError(f"ElementLibrary passed to add_library() has an invalid name: '{library_name}'") + libs = Gui.__extensions.get(library_name) + if libs is None: + Gui.__extensions[library_name] = [library] + else: + libs.append(library) + _ElementApiGenerator().add_library(library) @staticmethod def register_content_provider(content_type: type, content_provider: t.Callable[..., str]) -> None: @@ -1844,7 +1842,7 @@ def __init_route(self): def _call_on_exception(self, function_name: str, exception: Exception) -> bool: if hasattr(self, "on_exception") and callable(self.on_exception): try: - self.on_exception(self.__get_state(), str(function_name), exception) + self.on_exception(self.__get_state(), function_name, exception) except Exception as e: # pragma: no cover _warn("Exception raised in on_exception()", e) return True @@ -1984,8 +1982,10 @@ def __get_client_config(self) -> t.Dict[str, t.Any]: def __get_css_vars(self) -> str: css_vars = [] if stylekit := self._get_config("stylekit", _default_stylekit): - for k, v in stylekit.items(): - css_vars.append(f'--{k.replace("_", "-")}:{_get_css_var_value(v)};') + css_vars.extend( + f'--{k.replace("_", "-")}:{_get_css_var_value(v)};' + for k, v in stylekit.items() + ) return " ".join(css_vars) def __init_server(self): diff --git a/taipy/gui/utils/_evaluator.py b/taipy/gui/utils/_evaluator.py index 8aa1de34ae..5b76c84833 100644 --- a/taipy/gui/utils/_evaluator.py +++ b/taipy/gui/utils/_evaluator.py @@ -239,25 +239,26 @@ def refresh_expr(self, gui: Gui, var_name: str, holder: t.Optional[_TaipyBase]): """ This function will execute when the __request_var_update function receive a refresh order """ - expr = self.__hash_to_expr.get(var_name) - if expr: - expr_decoded, _ = _variable_decode(expr) - var_map = self.__expr_to_var_map.get(expr, {}) - eval_dict = {k: _getscopeattr_drill(gui, gui._bind_var(v)) for k, v in var_map.items()} - if self._is_expression(expr_decoded): - expr_string = 'f"' + _variable_decode(expr)[0].replace('"', '\\"') + '"' - else: - expr_string = expr_decoded - try: - ctx: t.Dict[str, t.Any] = {} - ctx.update(self.__global_ctx) - ctx.update(eval_dict) - expr_evaluated = eval(expr_string, ctx) - _setscopeattr(gui, var_name, expr_evaluated) - if holder is not None: - holder.set(expr_evaluated) - except Exception as e: - _warn(f"Exception raised evaluating {expr_string}", e) + if not (expr := self.__hash_to_expr.get(var_name)): + return + expr_decoded, _ = _variable_decode(expr) + var_map = self.__expr_to_var_map.get(expr, {}) + eval_dict = {k: _getscopeattr_drill(gui, gui._bind_var(v)) for k, v in var_map.items()} + expr_string = ( + 'f"' + _variable_decode(expr)[0].replace('"', '\\"') + '"' + if self._is_expression(expr_decoded) + else expr_decoded + ) + try: + ctx: t.Dict[str, t.Any] = {} + ctx.update(self.__global_ctx) + ctx.update(eval_dict) + expr_evaluated = eval(expr_string, ctx) + _setscopeattr(gui, var_name, expr_evaluated) + if holder is not None: + holder.set(expr_evaluated) + except Exception as e: + _warn(f"Exception raised evaluating {expr_string}", e) def re_evaluate_expr(self, gui: Gui, var_name: str) -> t.Set[str]: """ diff --git a/taipy/gui/utils/_variable_directory.py b/taipy/gui/utils/_variable_directory.py index c80ba0c9f7..da0f9783d3 100644 --- a/taipy/gui/utils/_variable_directory.py +++ b/taipy/gui/utils/_variable_directory.py @@ -117,7 +117,7 @@ def get_var(self, name: str, module: str) -> t.Optional[str]: _MODULE_NAME_MAP: t.List[str] = [] _MODULE_ID = "_TPMDL_" -_RE_TPMDL_DECODE = re.compile(r"(.*?)" + _MODULE_ID + r"(\d+)$") +_RE_TPMDL_DECODE = re.compile(f"(.*?){_MODULE_ID}" + r"(\d+)$") def _variable_encode(var_name: str, module_name: t.Optional[str]): diff --git a/taipy/gui/utils/chart_config_builder.py b/taipy/gui/utils/chart_config_builder.py index a45ac33530..76bb9f14ae 100644 --- a/taipy/gui/utils/chart_config_builder.py +++ b/taipy/gui/utils/chart_config_builder.py @@ -208,10 +208,15 @@ def _build_chart_config(gui: "Gui", attributes: t.Dict[str, t.Any], col_types: t used_cols = {tr[ax.value] for ax in axis[i] if tr[ax.value]} unused_cols = [c for c in icols[i] if c not in used_cols] if unused_cols and not any(tr[ax.value] for ax in axis[i]): - traces[i] = list( - v or (unused_cols.pop(0) if unused_cols and _Chart_iprops(j) in axis[i] else v) + traces[i] = [ + v + or ( + unused_cols.pop(0) + if unused_cols and _Chart_iprops(j) in axis[i] + else v + ) for j, v in enumerate(tr) - ) + ] if col_dict is not None: reverse_cols = {str(cd.get("dfid")): c for c, cd in col_dict.items()} diff --git a/taipy/gui/utils/html.py b/taipy/gui/utils/html.py index c66054855e..1bdcc93a27 100644 --- a/taipy/gui/utils/html.py +++ b/taipy/gui/utils/html.py @@ -14,9 +14,5 @@ def _get_css_var_value(value: t.Any) -> str: if isinstance(value, str): - if " " in value: - return f'"{value}"' - return value - if isinstance(value, int): - return f"{value}px" - return f"{value}" + return f'"{value}"' if " " in value else value + return f"{value}px" if isinstance(value, int) else f"{value}" diff --git a/taipy/gui/utils/types.py b/taipy/gui/utils/types.py index 0b93089aef..f92a8af3fc 100644 --- a/taipy/gui/utils/types.py +++ b/taipy/gui/utils/types.py @@ -54,14 +54,16 @@ def get_hash(): @staticmethod def _get_holder_prefixes() -> t.List[str]: if _TaipyBase.__HOLDER_PREFIXES is None: - _TaipyBase.__HOLDER_PREFIXES = [cls.get_hash() + "_" for cls in _TaipyBase.__subclasses__()] + _TaipyBase.__HOLDER_PREFIXES = [ + f"{cls.get_hash()}_" for cls in _TaipyBase.__subclasses__() + ] return _TaipyBase.__HOLDER_PREFIXES class _TaipyData(_TaipyBase): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "D" + return f"{_TaipyBase._HOLDER_PREFIX}D" class _TaipyBool(_TaipyBase): @@ -73,7 +75,7 @@ def cast_value(self, value: t.Any): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "B" + return f"{_TaipyBase._HOLDER_PREFIX}B" class _TaipyNumber(_TaipyBase): @@ -94,7 +96,7 @@ def cast_value(self, value: t.Any): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "N" + return f"{_TaipyBase._HOLDER_PREFIX}N" class _TaipyLoNumbers(_TaipyBase): @@ -109,7 +111,7 @@ def cast_value(self, value: t.Any): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Ln" + return f"{_TaipyBase._HOLDER_PREFIX}Ln" class _TaipyDate(_TaipyBase): @@ -128,7 +130,7 @@ def cast_value(self, value: t.Any): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Dt" + return f"{_TaipyBase._HOLDER_PREFIX}Dt" class _TaipyDateRange(_TaipyBase): @@ -145,37 +147,37 @@ def cast_value(self, value: t.Any): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Dr" + return f"{_TaipyBase._HOLDER_PREFIX}Dr" class _TaipyLovValue(_TaipyBase): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Lv" + return f"{_TaipyBase._HOLDER_PREFIX}Lv" class _TaipyLov(_TaipyBase): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "L" + return f"{_TaipyBase._HOLDER_PREFIX}L" class _TaipyContent(_TaipyBase): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "C" + return f"{_TaipyBase._HOLDER_PREFIX}C" class _TaipyContentImage(_TaipyBase): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Ci" + return f"{_TaipyBase._HOLDER_PREFIX}Ci" class _TaipyContentHtml(_TaipyBase): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Ch" + return f"{_TaipyBase._HOLDER_PREFIX}Ch" class _TaipyDict(_TaipyBase): @@ -185,4 +187,4 @@ def get(self): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Di" + return f"{_TaipyBase._HOLDER_PREFIX}Di" diff --git a/taipy/gui_core/_adapters.py b/taipy/gui_core/_adapters.py index 6a995090b6..4f50a4abf5 100644 --- a/taipy/gui_core/_adapters.py +++ b/taipy/gui_core/_adapters.py @@ -48,8 +48,7 @@ def get(self): data = super().get() if isinstance(data, Scenario): try: - scenario = core_get(data.id) - if scenario: + if scenario := core_get(data.id): return [ scenario.id, scenario.is_primary, @@ -85,7 +84,7 @@ def get(self): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Sc" + return f"{_TaipyBase._HOLDER_PREFIX}Sc" class _GuiCoreScenarioDagAdapter(_TaipyBase): @@ -97,8 +96,7 @@ def get(self): data = super().get() if isinstance(data, Scenario): try: - scenario = core_get(data.id) - if scenario: + if scenario := core_get(data.id): dag = data._get_dag() nodes = dict() for id, node in dag.nodes.items(): @@ -131,7 +129,7 @@ def get(self): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "ScG" + return f"{_TaipyBase._HOLDER_PREFIX}ScG" class _GuiCoreDatanodeAdapter(_TaipyBase): @@ -141,8 +139,7 @@ def get(self): data = super().get() if isinstance(data, DataNode): try: - datanode = core_get(data.id) - if datanode: + if datanode := core_get(data.id): owner = core_get(datanode.owner_id) if datanode.owner_id else None return [ datanode.id, @@ -175,4 +172,4 @@ def get(self): @staticmethod def get_hash(): - return _TaipyBase._HOLDER_PREFIX + "Dn" + return f"{_TaipyBase._HOLDER_PREFIX}Dn" diff --git a/taipy/gui_core/_context.py b/taipy/gui_core/_context.py index 3bbdded0e8..26e8c612b0 100644 --- a/taipy/gui_core/_context.py +++ b/taipy/gui_core/_context.py @@ -140,7 +140,8 @@ def process_event(self, event: Event): ) if sequence and hasattr(sequence, "parent_ids") and sequence.parent_ids: self.gui._broadcast( - _GuiCoreContext._CORE_CHANGED_NAME, {"scenario": [x for x in sequence.parent_ids]} + _GuiCoreContext._CORE_CHANGED_NAME, + {"scenario": list(sequence.parent_ids)}, ) except Exception as e: _warn(f"Access to sequence {event.entity_id} failed", e) @@ -308,12 +309,12 @@ def crud_scenario(self, state: State, id: str, payload: t.Dict[str, str]): # no core_delete(scenario_id) except Exception as e: state.assign(_GuiCoreContext._SCENARIO_SELECTOR_ERROR_VAR, f"Error deleting Scenario. {e}") - else: - if not self.__check_readable_editable( + elif self.__check_readable_editable( state, scenario_id, "Scenario", _GuiCoreContext._SCENARIO_SELECTOR_ERROR_VAR ): - return scenario = core_get(scenario_id) + else: + return else: config_id = data.get(_GuiCoreContext.__PROP_CONFIG_ID) scenario_config = Config.scenarios.get(config_id) @@ -405,8 +406,7 @@ def edit_entity(self, state: State, id: str, payload: t.Dict[str, str]): state, entity_id, data.get("type", "Scenario"), _GuiCoreContext._SCENARIO_VIZ_ERROR_VAR ): return - entity: t.Union[Scenario, Sequence] = core_get(entity_id) - if entity: + if entity := core_get(entity_id): try: if isinstance(entity, Scenario): primary = data.get(_GuiCoreContext.__PROP_SCENARIO_PRIMARY) @@ -434,8 +434,7 @@ def submit_entity(self, state: State, id: str, payload: t.Dict[str, str]): f"{data.get('type', 'Scenario')} {entity_id} is not submittable.", ) return - entity = core_get(entity_id) - if entity: + if entity := core_get(entity_id): try: jobs = core_submit(entity) if submission_cb := data.get("on_submission_change"): @@ -520,35 +519,34 @@ def data_node_adapter(self, data): if hasattr(data, "id") and is_readable(data.id) and core_get(data.id) is not None: if isinstance(data, DataNode): return (data.id, data.get_simple_label(), None, _EntityType.DATANODE.value, False) - else: - with self.lock: - self.__do_datanodes_tree() - if self.data_nodes_by_owner: - if isinstance(data, Cycle): + with self.lock: + self.__do_datanodes_tree() + if self.data_nodes_by_owner: + if isinstance(data, Cycle): + return ( + data.id, + data.get_simple_label(), + self.data_nodes_by_owner[data.id] + self.scenario_by_cycle.get(data, []), + _EntityType.CYCLE.value, + False, + ) + elif isinstance(data, Scenario): + return ( + data.id, + data.get_simple_label(), + self.data_nodes_by_owner[data.id] + list(data.sequences.values()), + _EntityType.SCENARIO.value, + data.is_primary, + ) + elif isinstance(data, Sequence): + if datanodes := self.data_nodes_by_owner.get(data.id): return ( data.id, data.get_simple_label(), - self.data_nodes_by_owner[data.id] + self.scenario_by_cycle.get(data, []), - _EntityType.CYCLE.value, + datanodes, + _EntityType.SEQUENCE.value, False, ) - elif isinstance(data, Scenario): - return ( - data.id, - data.get_simple_label(), - self.data_nodes_by_owner[data.id] + list(data.sequences.values()), - _EntityType.SCENARIO.value, - data.is_primary, - ) - elif isinstance(data, Sequence): - if datanodes := self.data_nodes_by_owner.get(data.id): - return ( - data.id, - data.get_simple_label(), - datanodes, - _EntityType.SEQUENCE.value, - False, - ) except Exception as e: _warn( f"Access to {type(data)} ({data.id if hasattr(data, 'id') else 'No_id'}) failed", @@ -661,7 +659,7 @@ def __edit_properties(self, entity: t.Union[Scenario, Sequence, DataNode], data: if isinstance(ent, Scenario): tags = data.get(_GuiCoreContext.__PROP_SCENARIO_TAGS) if isinstance(tags, (list, tuple)): - ent.tags = {t for t in tags} + ent.tags = set(tags) name = data.get(_GuiCoreContext.__PROP_ENTITY_NAME) if isinstance(name, str): ent.properties[_GuiCoreContext.__PROP_ENTITY_NAME] = name @@ -700,62 +698,62 @@ def get_scenarios_for_owner(self, owner_id: str): def get_data_node_history(self, datanode: DataNode, id: str): if ( - id - and isinstance(datanode, DataNode) - and id == datanode.id - and (dn := core_get(id)) - and isinstance(dn, DataNode) + not id + or not isinstance(datanode, DataNode) + or id != datanode.id + or not (dn := core_get(id)) + or not isinstance(dn, DataNode) ): - res = [] - for e in dn.edits: - job_id = e.get("job_id") - job: Job = None - if job_id: - if not is_readable(job_id): - job_id += " not readable" - else: - job = core_get(job_id) - res.append( - ( - e.get("timestamp"), - job_id if job_id else e.get("writer_identifier", ""), - f"Execution of task {job.task.get_simple_label()}." - if job and job.task - else e.get("comment", ""), - ) + return _DoNotUpdate() + res = [] + for e in dn.edits: + job_id = e.get("job_id") + job: Job = None + if job_id: + if not is_readable(job_id): + job_id += " not readable" + else: + job = core_get(job_id) + res.append( + ( + e.get("timestamp"), + job_id if job_id else e.get("writer_identifier", ""), + f"Execution of task {job.task.get_simple_label()}." + if job and job.task + else e.get("comment", ""), ) - return list(reversed(sorted(res, key=lambda r: r[0]))) - return _DoNotUpdate() + ) + return list(reversed(sorted(res, key=lambda r: r[0]))) def get_data_node_data(self, datanode: DataNode, id: str): if ( - id - and isinstance(datanode, DataNode) - and id == datanode.id - and (dn := core_get(id)) - and isinstance(dn, DataNode) + not id + or not isinstance(datanode, DataNode) + or id != datanode.id + or not (dn := core_get(id)) + or not isinstance(dn, DataNode) ): - if dn._last_edit_date: - if isinstance(dn, _AbstractTabularDataNode): + return _DoNotUpdate() + if dn._last_edit_date: + if isinstance(dn, _AbstractTabularDataNode): + return (None, None, True, None) + try: + value = dn.read() + if isinstance(value, (pd.DataFrame, pd.Series)): return (None, None, True, None) - try: - value = dn.read() - if isinstance(value, (pd.DataFrame, pd.Series)): - return (None, None, True, None) - return ( - value, - "date" - if "date" in type(value).__name__ - else type(value).__name__ - if isinstance(value, Number) - else None, - None, - None, - ) - except Exception as e: - return (None, None, None, f"read data_node: {e}") - return (None, None, None, f"Data unavailable for {dn.get_simple_label()}") - return _DoNotUpdate() + return ( + value, + "date" + if "date" in type(value).__name__ + else type(value).__name__ + if isinstance(value, Number) + else None, + None, + None, + ) + except Exception as e: + return (None, None, None, f"read data_node: {e}") + return (None, None, None, f"Data unavailable for {dn.get_simple_label()}") def __check_readable_editable(self, state: State, id: str, type: str, var: str): if not is_readable(id): diff --git a/taipy/rest/api/resources/cycle.py b/taipy/rest/api/resources/cycle.py index 6614073013..14100eb46d 100644 --- a/taipy/rest/api/resources/cycle.py +++ b/taipy/rest/api/resources/cycle.py @@ -28,10 +28,10 @@ def _get_or_raise(cycle_id: str) -> None: manager = _CycleManagerFactory._build_manager() - cycle = manager._get(cycle_id) - if not cycle: + if cycle := manager._get(cycle_id): + return cycle + else: raise NonExistingCycle(cycle_id) - return cycle class CycleResource(Resource): diff --git a/taipy/rest/api/resources/datanode.py b/taipy/rest/api/resources/datanode.py index 89f882bfd1..c2e9fec470 100644 --- a/taipy/rest/api/resources/datanode.py +++ b/taipy/rest/api/resources/datanode.py @@ -55,10 +55,10 @@ def _get_or_raise(data_node_id: str) -> None: manager = _DataManagerFactory._build_manager() - data_node = manager._get(data_node_id) - if not data_node: + if data_node := manager._get(data_node_id): + return data_node + else: raise NonExistingDataNode(data_node_id) - return data_node class DataNodeResource(Resource): @@ -458,10 +458,10 @@ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): - config = Config.data_nodes.get(config_id) - if not config: + if config := Config.data_nodes.get(config_id): + return config + else: raise NonExistingDataNodeConfig(config_id) - return config @_middleware def get(self): diff --git a/taipy/rest/api/resources/job.py b/taipy/rest/api/resources/job.py index 299c823e5c..830eed074b 100644 --- a/taipy/rest/api/resources/job.py +++ b/taipy/rest/api/resources/job.py @@ -197,10 +197,10 @@ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): - config = Config.tasks.get(config_id) - if not config: + if config := Config.tasks.get(config_id): + return config + else: raise NonExistingTaskConfig(config_id) - return config @_middleware def get(self): diff --git a/taipy/rest/api/resources/scenario.py b/taipy/rest/api/resources/scenario.py index 1eea6f0c05..e829310f1f 100644 --- a/taipy/rest/api/resources/scenario.py +++ b/taipy/rest/api/resources/scenario.py @@ -413,10 +413,10 @@ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): - config = Config.scenarios.get(config_id) - if not config: + if config := Config.scenarios.get(config_id): + return config + else: raise NonExistingScenarioConfig(config_id) - return config @_middleware def get(self): diff --git a/taipy/rest/api/resources/task.py b/taipy/rest/api/resources/task.py index 75ff847761..83cafd5d05 100644 --- a/taipy/rest/api/resources/task.py +++ b/taipy/rest/api/resources/task.py @@ -194,10 +194,10 @@ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): - config = Config.tasks.get(config_id) - if not config: + if config := Config.tasks.get(config_id): + return config + else: raise NonExistingTaskConfig(config_id) - return config @_middleware def get(self): diff --git a/taipy/rest/commons/apispec.py b/taipy/rest/commons/apispec.py index e1b83b9da8..9f4f2c658e 100644 --- a/taipy/rest/commons/apispec.py +++ b/taipy/rest/commons/apispec.py @@ -34,9 +34,7 @@ def _rule_for_view(view, app=None): if not endpoint: raise APISpecError("Could not find endpoint for view {0}".format(view)) - # WARNING: Assume 1 rule per view function for now - rule = app.url_map._rules_by_endpoint[endpoint][0] - return rule + return app.url_map._rules_by_endpoint[endpoint][0] class APISpecExt: diff --git a/taipy/rest/commons/encoder.py b/taipy/rest/commons/encoder.py index 3b949db347..53399e2e0a 100644 --- a/taipy/rest/commons/encoder.py +++ b/taipy/rest/commons/encoder.py @@ -20,9 +20,8 @@ class _CustomEncoder(json.JSONEncoder): def default(self, o: Any) -> Json: if isinstance(o, Enum): - result = o.value + return o.value elif isinstance(o, datetime): - result = {"__type__": "Datetime", "__value__": o.isoformat()} + return {"__type__": "Datetime", "__value__": o.isoformat()} else: - result = json.JSONEncoder.default(self, o) - return result + return json.JSONEncoder.default(self, o) diff --git a/taipy/templates/default/hooks/post_gen_project.py b/taipy/templates/default/hooks/post_gen_project.py index 44185aaffd..206e4025c6 100644 --- a/taipy/templates/default/hooks/post_gen_project.py +++ b/taipy/templates/default/hooks/post_gen_project.py @@ -98,13 +98,13 @@ def handle_multi_page_app(pages): with open(os.path.join(os.getcwd(), "pages", "page_example", "page_example.md"), "r") as page_md_file: page_md_content = page_md_file.read() page_md_content = page_md_content.replace("Page example", page_name.replace("_", " ").title()) - with open(os.path.join(os.getcwd(), "pages", page_name, page_name + ".md"), "w") as page_md_file: + with open(os.path.join(os.getcwd(), "pages", page_name, f"{page_name}.md"), "w") as page_md_file: page_md_file.write(page_md_content) with open(os.path.join(os.getcwd(), "pages", "page_example", "page_example.py"), "r") as page_content_file: page_py_content = page_content_file.read() page_py_content = page_py_content.replace("page_example", page_name) - with open(os.path.join(os.getcwd(), "pages", page_name, page_name + ".py"), "w") as page_content_file: + with open(os.path.join(os.getcwd(), "pages", page_name, f"{page_name}.py"), "w") as page_content_file: page_content_file.write(page_py_content) with open(os.path.join(os.getcwd(), "pages", "__init__.py"), "a") as page_init_file: @@ -155,16 +155,14 @@ def generate_main_file(): use_core = "{{ cookiecutter.__core }}".upper() use_rest = "{{ cookiecutter.__rest }}".upper() -handle_services(use_rest in ["YES", "Y"], use_core in ["YES", "Y"]) +handle_services(use_rest in {"YES", "Y"}, use_core in {"YES", "Y"}) pages = "{{ cookiecutter.__pages }}".split(" ") -# Remove empty string from pages list -pages = [page for page in pages if page != ""] -if len(pages) == 0: - handle_single_page_app() -else: +if pages := [page for page in pages if page != ""]: handle_multi_page_app(pages) +else: + handle_single_page_app() generate_main_file() # Remove the sections folder diff --git a/taipy/templates/scenario-management/hooks/post_gen_project.py b/taipy/templates/scenario-management/hooks/post_gen_project.py index fb14854b42..62aa8ef972 100644 --- a/taipy/templates/scenario-management/hooks/post_gen_project.py +++ b/taipy/templates/scenario-management/hooks/post_gen_project.py @@ -19,7 +19,7 @@ # Use TOML config file or not use_toml_config = "{{ cookiecutter.__use_toml_config }}".upper() -if use_toml_config == "YES" or use_toml_config == "Y": +if use_toml_config in {"YES", "Y"}: os.remove(os.path.join(os.getcwd(), "config", "config.py")) os.rename( os.path.join(os.getcwd(), "config", "config_with_toml.py"), os.path.join(os.getcwd(), "config", "config.py") diff --git a/taipy/templates/scenario-management/{{cookiecutter.__root_folder_name}}/config/config.py b/taipy/templates/scenario-management/{{cookiecutter.__root_folder_name}}/config/config.py index 20dcb4bc62..c7dcb04a75 100644 --- a/taipy/templates/scenario-management/{{cookiecutter.__root_folder_name}}/config/config.py +++ b/taipy/templates/scenario-management/{{cookiecutter.__root_folder_name}}/config/config.py @@ -28,9 +28,10 @@ def configure(): input=[initial_dataset_cfg, replacement_type_cfg], output=cleaned_dataset_cfg, ) - scenario_cfg = Config.configure_scenario( - "scenario_configuration", task_configs=[clean_data_cfg], frequency=Frequency.DAILY + return Config.configure_scenario( + "scenario_configuration", + task_configs=[clean_data_cfg], + frequency=Frequency.DAILY, ) - return scenario_cfg # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## diff --git a/taipy/templates/tests/test_scenario_mgt_template.py b/taipy/templates/tests/test_scenario_mgt_template.py index 4b489c8d9c..dd175ac328 100644 --- a/taipy/templates/tests/test_scenario_mgt_template.py +++ b/taipy/templates/tests/test_scenario_mgt_template.py @@ -81,7 +81,13 @@ def test_scenario_management_without_toml_config(tmpdir): with open(os.path.join(tmpdir, "foo_app", "config", "config.py")) as config_file: config_content = config_file.read() assert 'Config.load("config/config.toml")' not in config_content - assert all([x in config_content for x in ["Config.configure_csv_data_node", "Config.configure_task"]]) + assert all( + x in config_content + for x in [ + "Config.configure_csv_data_node", + "Config.configure_task", + ] + ) oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "foo_app")) diff --git a/tests/config/global_app/test_global_app_config.py b/tests/config/global_app/test_global_app_config.py index 931aa49ed1..e23ce21737 100644 --- a/tests/config/global_app/test_global_app_config.py +++ b/tests/config/global_app/test_global_app_config.py @@ -43,4 +43,4 @@ def test_block_update_global_app_config(): # Test if the global_config stay as default assert Config.global_config.foo is None - assert len(Config.global_config.properties) == 0 + assert not Config.global_config.properties diff --git a/tests/config/test_override_config.py b/tests/config/test_override_config.py index 47e19fdc45..cc61eb023a 100644 --- a/tests/config/test_override_config.py +++ b/tests/config/test_override_config.py @@ -20,7 +20,7 @@ def test_override_default_configuration_with_code_configuration(): - assert not Config.global_config.root_folder == "foo" + assert Config.global_config.root_folder != "foo" assert len(Config.unique_sections) == 1 assert Config.unique_sections["unique_section_name"] is not None diff --git a/tests/config/test_section_serialization.py b/tests/config/test_section_serialization.py index 58397eff76..c08c196eb3 100644 --- a/tests/config/test_section_serialization.py +++ b/tests/config/test_section_serialization.py @@ -34,11 +34,11 @@ class CustomClass: class CustomEncoder(json.JSONEncoder): def default(self, o): - if isinstance(o, datetime): - result = {"__type__": "Datetime", "__value__": o.isoformat()} - else: - result = json.JSONEncoder.default(self, o) - return result + return ( + {"__type__": "Datetime", "__value__": o.isoformat()} + if isinstance(o, datetime) + else json.JSONEncoder.default(self, o) + ) class CustomDecoder(json.JSONDecoder): diff --git a/tests/core/_manager/test_manager.py b/tests/core/_manager/test_manager.py index a00eea48aa..742630cf41 100644 --- a/tests/core/_manager/test_manager.py +++ b/tests/core/_manager/test_manager.py @@ -41,10 +41,7 @@ class MockEntity: def __init__(self, id: str, name: str, version: str = None) -> None: self.id = id self.name = name - if version: - self._version = version - else: - self._version = _VersionManager._get_latest_version() + self._version = version if version else _VersionManager._get_latest_version() class MockConverter(_AbstractConverter): diff --git a/tests/core/_orchestrator/test_orchestrator.py b/tests/core/_orchestrator/test_orchestrator.py index d6267e508a..5fdfbee764 100644 --- a/tests/core/_orchestrator/test_orchestrator.py +++ b/tests/core/_orchestrator/test_orchestrator.py @@ -261,19 +261,19 @@ def test_scenario_only_submit_same_task_once(): jobs = _Orchestrator.submit(scenario_1) assert len(jobs) == 3 - assert all([job.is_completed() for job in jobs]) + assert all(job.is_completed() for job in jobs) assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED jobs = _Orchestrator.submit(sequence_1) assert len(jobs) == 2 - assert all([job.is_completed() for job in jobs]) + assert all(job.is_completed() for job in jobs) assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED jobs = _Orchestrator.submit(sequence_2) assert len(jobs) == 2 - assert all([job.is_completed() for job in jobs]) + assert all(job.is_completed() for job in jobs) assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED @@ -309,7 +309,10 @@ def test_update_status_fail_job(): jobs = _Orchestrator.submit(scenario_1) tasks_jobs = {job._task.id: job for job in jobs} assert tasks_jobs["task_0"].is_failed() - assert all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]]) + assert all( + job.is_abandoned() + for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]] + ) assert tasks_jobs["task_3"].is_completed() assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED @@ -317,7 +320,10 @@ def test_update_status_fail_job(): jobs = _Orchestrator.submit(scenario_2) tasks_jobs = {job._task.id: job for job in jobs} assert tasks_jobs["task_0"].is_failed() - assert all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]]) + assert all( + job.is_abandoned() + for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]] + ) assert tasks_jobs["task_3"].is_completed() assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED @@ -349,7 +355,7 @@ def test_update_status_fail_job_in_parallel_one_sequence(): task_3 = Task("task_config_3", {}, print, input=[dn_2], id="task_3") sc = Scenario( "scenario_config_1", - set([task_0, task_1, task_2, task_3]), + {task_0, task_1, task_2, task_3}, {}, set(), "scenario_1", @@ -368,7 +374,12 @@ def test_update_status_fail_job_in_parallel_one_sequence(): tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs["task_0"].is_failed) - assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]])) + assert_true_after_time( + lambda: all( + job.is_abandoned() + for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]] + ) + ) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) submit_id = jobs[0].submit_id submission = _SubmissionManager._get(submit_id) @@ -386,7 +397,13 @@ def test_update_status_fail_job_in_parallel_one_scenario(): task_1 = Task("task_config_1", {}, print, input=[dn_0], output=[dn_1], id="task_1") task_2 = Task("task_config_2", {}, print, input=[dn_1], id="task_2") task_3 = Task("task_config_3", {}, print, input=[dn_2], id="task_3") - sc = Scenario("scenario_config_1", set([task_0, task_1, task_2, task_3]), {}, set(), "scenario_1") + sc = Scenario( + "scenario_config_1", + {task_0, task_1, task_2, task_3}, + {}, + set(), + "scenario_1", + ) _DataManager._set(dn_0) _DataManager._set(dn_1) @@ -402,7 +419,12 @@ def test_update_status_fail_job_in_parallel_one_scenario(): tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs["task_0"].is_failed) assert_true_after_time(tasks_jobs["task_3"].is_completed) - assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]])) + assert_true_after_time( + lambda: all( + job.is_abandoned() + for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]] + ) + ) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) submit_id = jobs[0].submit_id submission = _SubmissionManager._get(submit_id) diff --git a/tests/core/common/test_warn_if_inputs_not_ready.py b/tests/core/common/test_warn_if_inputs_not_ready.py index 9138e01906..bdd480f238 100644 --- a/tests/core/common/test_warn_if_inputs_not_ready.py +++ b/tests/core/common/test_warn_if_inputs_not_ready.py @@ -28,7 +28,7 @@ def test_warn_inputs_all_not_ready(caplog): f"path : {input_dn.path} " for input_dn in data_nodes ] - assert all([expected_output in stdout for expected_output in expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) def test_warn_inputs_all_ready(caplog): @@ -45,7 +45,10 @@ def test_warn_inputs_all_ready(caplog): f"path : {input_dn.path} " for input_dn in data_nodes ] - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) def test_warn_inputs_one_ready(caplog): @@ -69,8 +72,11 @@ def test_warn_inputs_one_ready(caplog): for input_dn in [data_nodes[one]] ] - assert all([expected_output in stdout for expected_output in expected_outputs]) - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) def test_submit_task_with_input_dn_wrong_file_path(caplog): @@ -92,4 +98,4 @@ def test_submit_task_with_input_dn_wrong_file_path(caplog): f"path : {input_dn.path} " for input_dn in dns ] - assert all([expected_output in stdout for expected_output in expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) diff --git a/tests/core/config/test_config.py b/tests/core/config/test_config.py index 407d2d83b0..51ab82e291 100644 --- a/tests/core/config/test_config.py +++ b/tests/core/config/test_config.py @@ -27,7 +27,16 @@ def test_configure_excel_data_node(self): assert len(Config.data_nodes) == 2 def test_configure_generic_data_node(self): - a, b, c, d, e, f, g, h = "foo", print, print, tuple([]), tuple([]), Scope.SCENARIO, timedelta(1), "qux" + a, b, c, d, e, f, g, h = ( + "foo", + print, + print, + (), + (), + Scope.SCENARIO, + timedelta(1), + "qux", + ) Config.configure_generic_data_node(a, b, c, d, e, f, g, property=h) assert len(Config.data_nodes) == 2 diff --git a/tests/core/config/test_config_serialization.py b/tests/core/config/test_config_serialization.py index ebef41456e..ba65e67d2d 100644 --- a/tests/core/config/test_config_serialization.py +++ b/tests/core/config/test_config_serialization.py @@ -30,15 +30,11 @@ def migrate_csv_path(dn): def compare_function(*data_node_results): comparison_result = {} - current_result_index = 0 - for current_result in data_node_results: + for current_result_index, current_result in enumerate(data_node_results): comparison_result[current_result_index] = {} - next_result_index = 0 - for next_result in data_node_results: + for next_result_index, next_result in enumerate(data_node_results): print(f"comparing result {current_result_index} with result {next_result_index}") comparison_result[current_result_index][next_result_index] = next_result - current_result - next_result_index += 1 - current_result_index += 1 return comparison_result @@ -49,11 +45,11 @@ class CustomClass: class CustomEncoder(json.JSONEncoder): def default(self, o): - if isinstance(o, datetime): - result = {"__type__": "Datetime", "__value__": o.isoformat()} - else: - result = json.JSONEncoder.default(self, o) - return result + return ( + {"__type__": "Datetime", "__value__": o.isoformat()} + if isinstance(o, datetime) + else json.JSONEncoder.default(self, o) + ) class CustomDecoder(json.JSONDecoder): @@ -270,9 +266,12 @@ def test_read_write_toml_configuration_file(): Config.sections[DataNodeConfig.name]["test_json_dn"].id, Config.sections[DataNodeConfig.name]["test_pickle_dn"].id, ] - sequences = {} - for sequence_name, sequence_tasks in Config.sections[ScenarioConfig.name]["test_scenario"].sequences.items(): - sequences[sequence_name] = [task.id for task in sequence_tasks] + sequences = { + sequence_name: [task.id for task in sequence_tasks] + for sequence_name, sequence_tasks in Config.sections[ + ScenarioConfig.name + ]["test_scenario"].sequences.items() + } assert sequences == {"sequence1": [Config.sections[TaskConfig.name]["test_task"].id]} assert dict(Config.sections[ScenarioConfig.name]["test_scenario"].comparators) == { @@ -469,9 +468,12 @@ def test_read_write_json_configuration_file(): Config.sections[DataNodeConfig.name]["test_json_dn"].id, Config.sections[DataNodeConfig.name]["test_pickle_dn"].id, ] - sequences = {} - for sequence_name, sequence_tasks in Config.sections[ScenarioConfig.name]["test_scenario"].sequences.items(): - sequences[sequence_name] = [task.id for task in sequence_tasks] + sequences = { + sequence_name: [task.id for task in sequence_tasks] + for sequence_name, sequence_tasks in Config.sections[ + ScenarioConfig.name + ]["test_scenario"].sequences.items() + } assert sequences == {"sequence1": [Config.sections[TaskConfig.name]["test_task"].id]} assert dict(Config.sections[ScenarioConfig.name]["test_scenario"].comparators) == { @@ -610,10 +612,12 @@ def test_read_write_toml_configuration_file_migrate_sequence_in_scenario(): assert [task.id for task in Config.sections[ScenarioConfig.name]["test_scenario"].tasks] == [ Config.sections[TaskConfig.name]["test_task"].id ] - assert [ + assert not [ additional_data_node.id - for additional_data_node in Config.sections[ScenarioConfig.name]["test_scenario"].additional_data_nodes - ] == [] + for additional_data_node in Config.sections[ScenarioConfig.name][ + "test_scenario" + ].additional_data_nodes + ] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name]["test_scenario"].data_nodes]) == [ Config.sections[DataNodeConfig.name]["test_csv_dn"].id, Config.sections[DataNodeConfig.name]["test_json_dn"].id, @@ -784,10 +788,12 @@ def test_read_write_json_configuration_file_migrate_sequence_in_scenario(): assert [task.id for task in Config.sections[ScenarioConfig.name]["test_scenario"].tasks] == [ Config.sections[TaskConfig.name]["test_task"].id ] - assert [ + assert not [ additional_data_node.id - for additional_data_node in Config.sections[ScenarioConfig.name]["test_scenario"].additional_data_nodes - ] == [] + for additional_data_node in Config.sections[ScenarioConfig.name][ + "test_scenario" + ].additional_data_nodes + ] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name]["test_scenario"].data_nodes]) == [ Config.sections[DataNodeConfig.name]["test_csv_dn"].id, Config.sections[DataNodeConfig.name]["test_json_dn"].id, diff --git a/tests/core/config/test_override_config.py b/tests/core/config/test_override_config.py index 8bf7b76b0d..6ad659de1f 100644 --- a/tests/core/config/test_override_config.py +++ b/tests/core/config/test_override_config.py @@ -20,7 +20,7 @@ def test_override_default_configuration_with_code_configuration(): - assert not Config.core.root_folder == "foo" + assert Config.core.root_folder != "foo" assert len(Config.data_nodes) == 1 assert len(Config.tasks) == 1 assert len(Config.scenarios) == 1 diff --git a/tests/core/config/test_scenario_config.py b/tests/core/config/test_scenario_config.py index 1a8fff3a5f..04493f6efa 100644 --- a/tests/core/config/test_scenario_config.py +++ b/tests/core/config/test_scenario_config.py @@ -150,7 +150,12 @@ def test_scenario_getitem(): assert scenario.tasks == [task_config_1, task_config_2] assert scenario.additional_data_node_configs == [dn_config_4] - assert set(scenario.data_nodes) == set([dn_config_4, dn_config_1, dn_config_2, dn_config_3]) + assert set(scenario.data_nodes) == { + dn_config_4, + dn_config_1, + dn_config_2, + dn_config_3, + } assert Config.scenarios[scenario_id].properties == scenario.properties diff --git a/tests/core/conftest.py b/tests/core/conftest.py index 4d831dbb35..99381445ad 100644 --- a/tests/core/conftest.py +++ b/tests/core/conftest.py @@ -104,7 +104,7 @@ def excel_file_with_multi_sheet(tmpdir_factory) -> str: fn = tmpdir_factory.mktemp("data").join("df.xlsx") with pd.ExcelWriter(str(fn)) as writer: - for key in excel_multi_sheet.keys(): + for key in excel_multi_sheet: excel_multi_sheet[key].to_excel(writer, key, index=False) return fn.strpath @@ -275,7 +275,7 @@ def sequence(): [], SequenceId("sequence_id"), owner_id="owner_id", - parent_ids=set(["parent_id_1", "parent_id_2"]), + parent_ids={"parent_id_1", "parent_id_2"}, version="random_version_number", ) diff --git a/tests/core/data/test_data_node.py b/tests/core/data/test_data_node.py index 5a7582d0e0..87ac514e6f 100644 --- a/tests/core/data/test_data_node.py +++ b/tests/core/data/test_data_node.py @@ -721,6 +721,7 @@ def test_label(self): ) with mock.patch("taipy.core.get") as get_mck: + class MockOwner: label = "owner_label" @@ -728,7 +729,7 @@ def get_label(self): return self.label get_mck.return_value = MockOwner() - assert dn.get_label() == "owner_label > " + dn.name + assert dn.get_label() == f"owner_label > {dn.name}" assert dn.get_simple_label() == dn.name def test_explicit_label(self): diff --git a/tests/core/data/test_filter_data_node.py b/tests/core/data/test_filter_data_node.py index 8ed8237948..94a2fcd6cc 100644 --- a/tests/core/data/test_filter_data_node.py +++ b/tests/core/data/test_filter_data_node.py @@ -218,9 +218,9 @@ def test_filter_by_get_item(default_data_frame): assert len(filtered_df_dn) == len(default_data_frame[1]) assert filtered_df_dn.to_dict() == default_data_frame[1].to_dict() - filtered_df_dn = df_dn[0:2] + filtered_df_dn = df_dn[:2] assert isinstance(filtered_df_dn, pd.DataFrame) - assert filtered_df_dn.shape == default_data_frame[0:2].shape + assert filtered_df_dn.shape == default_data_frame[:2].shape assert len(filtered_df_dn) == 2 bool_df = default_data_frame.copy(deep=True) > 4 @@ -244,14 +244,14 @@ def test_filter_by_get_item(default_data_frame): filtered_custom_dn = custom_dn["a"] assert isinstance(filtered_custom_dn, List) assert len(filtered_custom_dn) == 10 - assert filtered_custom_dn == [i for i in range(10)] + assert filtered_custom_dn == list(range(10)) - filtered_custom_dn = custom_dn[0:5] + filtered_custom_dn = custom_dn[:5] assert isinstance(filtered_custom_dn, List) - assert all([isinstance(x, CustomClass) for x in filtered_custom_dn]) + assert all(isinstance(x, CustomClass) for x in filtered_custom_dn) assert len(filtered_custom_dn) == 5 - bool_1d_index = [True if i < 5 else False for i in range(10)] + bool_1d_index = [i < 5 for i in range(10)] filtered_custom_dn = custom_dn[bool_1d_index] assert isinstance(filtered_custom_dn, List) assert len(filtered_custom_dn) == 5 @@ -259,7 +259,7 @@ def test_filter_by_get_item(default_data_frame): filtered_custom_dn = custom_dn[["a", "b"]] assert isinstance(filtered_custom_dn, List) - assert all([isinstance(x, Dict) for x in filtered_custom_dn]) + assert all(isinstance(x, Dict) for x in filtered_custom_dn) assert len(filtered_custom_dn) == 10 assert filtered_custom_dn == [{"a": i, "b": i * 2} for i in range(10)] @@ -276,8 +276,8 @@ def test_filter_by_get_item(default_data_frame): assert len(filtered_multi_sheet_excel_custom_dn) == 10 expected_value = [CustomClass(i, i * 2) for i in range(10)] assert all( - [ - expected.a == filtered.a and expected.b == filtered.b - for expected, filtered in zip(expected_value, filtered_multi_sheet_excel_custom_dn) - ] + expected.a == filtered.a and expected.b == filtered.b + for expected, filtered in zip( + expected_value, filtered_multi_sheet_excel_custom_dn + ) ) diff --git a/tests/core/data/test_generic_data_node.py b/tests/core/data/test_generic_data_node.py index 6f633eba25..71dbfd88e4 100644 --- a/tests/core/data/test_generic_data_node.py +++ b/tests/core/data/test_generic_data_node.py @@ -46,7 +46,7 @@ def read_fct_modify_data_node_name(data_node_id: DataNodeId, name: str): def reset_data(): - TestGenericDataNode.data = [i for i in range(10)] + TestGenericDataNode.data = list(range(10)) class TestGenericDataNode: @@ -201,7 +201,7 @@ def test_read_write_generic_datanode_with_arguments(self): }, ) - assert all([a + 1 == b for a, b in zip(self.data, generic_dn.read())]) + assert all(a + 1 == b for a, b in zip(self.data, generic_dn.read())) assert len(generic_dn.read()) == 10 generic_dn.write(self.data) @@ -221,7 +221,7 @@ def test_read_write_generic_datanode_with_non_list_arguments(self): }, ) - assert all([a + 1 == b for a, b in zip(self.data, generic_dn.read())]) + assert all(a + 1 == b for a, b in zip(self.data, generic_dn.read())) assert len(generic_dn.read()) == 10 generic_dn.write(self.data) diff --git a/tests/core/data/test_json_data_node.py b/tests/core/data/test_json_data_node.py index c6e77c9d9e..67468abf00 100644 --- a/tests/core/data/test_json_data_node.py +++ b/tests/core/data/test_json_data_node.py @@ -194,12 +194,12 @@ def test_append_to_a_dictionary(self, json_file): # Append another dictionary append_data_1 = {"d": 1, "e": 2, "f": 3} json_dn.append(append_data_1) - assert json_dn.read() == {**original_data, **append_data_1} + assert json_dn.read() == original_data | append_data_1 # Append an overlap dictionary append_data_data_2 = {"a": 10, "b": 20, "g": 30} json_dn.append(append_data_data_2) - assert json_dn.read() == {**original_data, **append_data_1, **append_data_data_2} + assert json_dn.read() == original_data | append_data_1 | append_data_data_2 def test_write(self, json_file): json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file}) diff --git a/tests/core/data/test_parquet_data_node.py b/tests/core/data/test_parquet_data_node.py index da3d6f7832..247c026c28 100644 --- a/tests/core/data/test_parquet_data_node.py +++ b/tests/core/data/test_parquet_data_node.py @@ -213,12 +213,12 @@ def test_read_custom_exposed_type(self): dn = ParquetDataNode( "foo", Scope.SCENARIO, properties={"path": example_parquet_path, "exposed_type": MyCustomObject} ) - assert all([isinstance(obj, MyCustomObject) for obj in dn.read()]) + assert all(isinstance(obj, MyCustomObject) for obj in dn.read()) dn = ParquetDataNode( "foo", Scope.SCENARIO, properties={"path": example_parquet_path, "exposed_type": create_custom_class} ) - assert all([isinstance(obj, MyOtherCustomObject) for obj in dn.read()]) + assert all(isinstance(obj, MyOtherCustomObject) for obj in dn.read()) def test_raise_error_unknown_parquet_engine(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet") diff --git a/tests/core/job/test_job.py b/tests/core/job/test_job.py index c460f20883..2eb76bc40d 100644 --- a/tests/core/job/test_job.py +++ b/tests/core/job/test_job.py @@ -94,7 +94,7 @@ def test_create_job(scenario, task, job): assert job.submit_entity == scenario with mock.patch("taipy.core.get") as get_mck: get_mck.return_value = task - assert job.get_label() == "name > " + job.id + assert job.get_label() == f"name > {job.id}" assert job.get_simple_label() == job.id diff --git a/tests/core/job/test_job_manager.py b/tests/core/job/test_job_manager.py index 2892eb34c9..2a83c87f39 100644 --- a/tests/core/job/test_job_manager.py +++ b/tests/core/job/test_job_manager.py @@ -483,7 +483,7 @@ def _create_task(function, nb_outputs=1, name=None): output_dn_configs = [ Config.configure_data_node(f"output{i}", "pickle", Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] - _DataManager._bulk_get_or_create({cfg for cfg in output_dn_configs}) + _DataManager._bulk_get_or_create(set(output_dn_configs)) name = name or "".join(random.choice(string.ascii_lowercase) for _ in range(10)) task_config = Config.configure_task( id=name, diff --git a/tests/core/job/test_job_manager_with_sql_repo.py b/tests/core/job/test_job_manager_with_sql_repo.py index 9a7d16b957..d796f97db7 100644 --- a/tests/core/job/test_job_manager_with_sql_repo.py +++ b/tests/core/job/test_job_manager_with_sql_repo.py @@ -274,7 +274,7 @@ def _create_task(function, nb_outputs=1, name=None): output_dn_configs = [ Config.configure_data_node(f"output{i}", scope=Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] - _DataManager._bulk_get_or_create({cfg for cfg in output_dn_configs}) + _DataManager._bulk_get_or_create(set(output_dn_configs)) name = name or "".join(random.choice(string.ascii_lowercase) for _ in range(10)) task_config = Config.configure_task( id=name, diff --git a/tests/core/notification/test_notifier.py b/tests/core/notification/test_notifier.py index dce98e8c0b..3da3de2429 100644 --- a/tests/core/notification/test_notifier.py +++ b/tests/core/notification/test_notifier.py @@ -339,13 +339,11 @@ def test_publish_creation_event(): expected_event_entity_id = [cycle.id, dn.id, task.id, sequence.id, scenario.id] assert all( - [ - event.entity_type == expected_event_types[i] - and event.entity_id == expected_event_entity_id[i] - and event.operation == EventOperation.CREATION - and event.attribute_name is None - for i, event in enumerate(published_events) - ] + event.entity_type == expected_event_types[i] + and event.entity_id == expected_event_entity_id[i] + and event.operation == EventOperation.CREATION + and event.attribute_name is None + for i, event in enumerate(published_events) ) @@ -538,13 +536,11 @@ def test_publish_update_event(): expected_event_operation_type = [EventOperation.UPDATE] * len(expected_event_types) assert all( - [ - event.entity_type == expected_event_types[i] - and event.entity_id == expected_event_entity_id[i] - and event.operation == expected_event_operation_type[i] - and event.attribute_name == expected_attribute_names[i] - for i, event in enumerate(published_events) - ] + event.entity_type == expected_event_types[i] + and event.entity_id == expected_event_entity_id[i] + and event.operation == expected_event_operation_type[i] + and event.attribute_name == expected_attribute_names[i] + for i, event in enumerate(published_events) ) @@ -686,13 +682,11 @@ def test_publish_update_event_in_context_manager(): ] assert all( - [ - event.entity_type == expected_event_types[i] - and event.entity_id == expected_event_entity_id[i] - and event.operation == EventOperation.UPDATE - and event.attribute_name == expected_attribute_names[i] - for i, event in enumerate(published_events) - ] + event.entity_type == expected_event_types[i] + and event.entity_id == expected_event_entity_id[i] + and event.operation == EventOperation.UPDATE + and event.attribute_name == expected_attribute_names[i] + for i, event in enumerate(published_events) ) @@ -739,13 +733,11 @@ def test_publish_submission_event(): ] expected_event_entity_id = [job.submit_id, job.id, job.submit_id, job.id, job.submit_id, scenario.id] assert all( - [ - event.entity_type == expected_event_types[i] - and event.entity_id == expected_event_entity_id[i] - and event.operation == expected_operations[i] - and event.attribute_name == expected_attribute_names[i] - for i, event in enumerate(published_events) - ] + event.entity_type == expected_event_types[i] + and event.entity_id == expected_event_entity_id[i] + and event.operation == expected_operations[i] + and event.attribute_name == expected_attribute_names[i] + for i, event in enumerate(published_events) ) @@ -792,13 +784,11 @@ def test_publish_deletion_event(): expected_event_operation_type = [EventOperation.DELETION] * len(expected_event_types) assert all( - [ - event.entity_type == expected_event_types[i] - and event.entity_id == expected_event_entity_id[i] - and event.operation == expected_event_operation_type[i] - and event.attribute_name is None - for i, event in enumerate(published_events) - ] + event.entity_type == expected_event_types[i] + and event.entity_id == expected_event_entity_id[i] + and event.operation == expected_event_operation_type[i] + and event.attribute_name is None + for i, event in enumerate(published_events) ) scenario = tp.create_scenario(scenario_config) @@ -826,11 +816,9 @@ def test_publish_deletion_event(): expected_event_entity_id = [None, cycle.id, scenario.id, None, None] assert all( - [ - event.entity_type == expected_event_types[i] - and event.entity_id == expected_event_entity_id[i] - and event.operation == EventOperation.DELETION - and event.attribute_name is None - for i, event in enumerate(published_events) - ] + event.entity_type == expected_event_types[i] + and event.entity_id == expected_event_entity_id[i] + and event.operation == EventOperation.DELETION + and event.attribute_name is None + for i, event in enumerate(published_events) ) diff --git a/tests/core/repository/mocks.py b/tests/core/repository/mocks.py index 30a94f3f3a..6a601f9059 100644 --- a/tests/core/repository/mocks.py +++ b/tests/core/repository/mocks.py @@ -39,10 +39,7 @@ class MockObj: def __init__(self, id: str, name: str, version: Optional[str] = None) -> None: self.id = id self.name = name - if version: - self._version = version - else: - self._version = _VersionManager._get_latest_version() + self._version = version if version else _VersionManager._get_latest_version() @dataclass diff --git a/tests/core/scenario/test_scenario.py b/tests/core/scenario/test_scenario.py index 4909565488..43b6b70402 100644 --- a/tests/core/scenario/test_scenario.py +++ b/tests/core/scenario/test_scenario.py @@ -51,6 +51,7 @@ def test_create_primary_scenario(cycle): with mock.patch("taipy.core.get") as get_mck: + class MockOwner: label = "owner_label" @@ -58,7 +59,7 @@ def get_label(self): return self.label get_mck.return_value = MockOwner() - assert scenario.get_label() == "owner_label > " + scenario.config_id + assert scenario.get_label() == f"owner_label > {scenario.config_id}" def test_create_scenario_at_time(current_datetime): @@ -83,7 +84,9 @@ def test_create_scenario_with_task_and_additional_dn_and_sequence(): dn_2 = PickleDataNode("abc", Scope.SCENARIO) task = Task("qux", {}, print, [dn_1]) - scenario = Scenario("quux", set([task]), {}, set([dn_2]), sequences={"acb": {"tasks": [task]}}) + scenario = Scenario( + "quux", {task}, {}, {dn_2}, sequences={"acb": {"tasks": [task]}} + ) sequence = scenario.sequences["acb"] assert scenario.id is not None assert scenario.config_id == "quux" @@ -123,7 +126,7 @@ def test_create_scenario_and_add_sequences(): task_manager._set(task_1) task_manager._set(task_2) - scenario = Scenario("scenario", set([task_1]), {}) + scenario = Scenario("scenario", {task_1}, {}) scenario.sequences = {"sequence_1": {"tasks": [task_1]}, "sequence_2": {"tasks": []}} assert scenario.id is not None assert scenario.config_id == "scenario" @@ -160,7 +163,7 @@ def test_create_scenario_overlapping_sequences(): task_manager._set(task_1) task_manager._set(task_2) - scenario = Scenario("scenario", set([task_1, task_2]), {}) + scenario = Scenario("scenario", {task_1, task_2}, {}) scenario.add_sequence("sequence_1", [task_1]) scenario.add_sequence("sequence_2", [task_1, task_2]) assert scenario.id is not None @@ -204,7 +207,7 @@ def test_create_scenario_one_additional_dn(): task_manager._set(task_1) task_manager._set(task_2) - scenario = Scenario("scenario", set(), {}, set([additional_dn_1])) + scenario = Scenario("scenario", set(), {}, {additional_dn_1}) assert scenario.id is not None assert scenario.config_id == "scenario" assert len(scenario.tasks) == 0 @@ -235,7 +238,7 @@ def test_create_scenario_wth_additional_dns(): task_manager._set(task_1) task_manager._set(task_2) - scenario = Scenario("scenario", set(), {}, set([additional_dn_1, additional_dn_2])) + scenario = Scenario("scenario", set(), {}, {additional_dn_1, additional_dn_2}) assert scenario.id is not None assert scenario.config_id == "scenario" assert len(scenario.tasks) == 0 @@ -251,7 +254,7 @@ def test_create_scenario_wth_additional_dns(): additional_dn_2.config_id: additional_dn_2, } - scenario_1 = Scenario("scenario_1", set([task_1]), {}, set([additional_dn_1])) + scenario_1 = Scenario("scenario_1", {task_1}, {}, {additional_dn_1}) assert scenario_1.id is not None assert scenario_1.config_id == "scenario_1" assert len(scenario_1.tasks) == 1 @@ -267,7 +270,9 @@ def test_create_scenario_wth_additional_dns(): additional_dn_1.config_id: additional_dn_1, } - scenario_2 = Scenario("scenario_2", set([task_1, task_2]), {}, set([additional_dn_1, additional_dn_2])) + scenario_2 = Scenario( + "scenario_2", {task_1, task_2}, {}, {additional_dn_1, additional_dn_2} + ) assert scenario_2.id is not None assert scenario_2.config_id == "scenario_2" assert len(scenario_2.tasks) == 2 @@ -541,7 +546,7 @@ def test_auto_set_and_reload(cycle, current_datetime, task, data_node): assert len(scenario_2.subscribers) == 3 scenario_1.subscribers = [] - assert len(scenario_1.subscribers) == 0 + assert not scenario_1.subscribers assert len(scenario_2.subscribers) == 0 assert len(scenario_1.tags) == 0 @@ -656,7 +661,7 @@ def test_auto_set_and_reload(cycle, current_datetime, task, data_node): assert scenario.creation_date == new_datetime assert scenario.cycle == cycle assert scenario.is_primary - assert len(scenario.subscribers) == 0 + assert not scenario.subscribers assert len(scenario.tags) == 1 assert scenario._is_in_context assert scenario.name == "baz" @@ -666,16 +671,16 @@ def test_auto_set_and_reload(cycle, current_datetime, task, data_node): assert scenario.properties["temp_key_5"] == 0 assert scenario_1.config_id == "foo" - assert len(scenario_1.sequences) == 0 - assert len(scenario_1.tasks) == 0 - assert len(scenario_1.additional_data_nodes) == 0 + assert not scenario_1.sequences + assert not scenario_1.tasks + assert not scenario_1.additional_data_nodes assert scenario_1.tasks == {} assert scenario_1.additional_data_nodes == {} assert scenario_1.creation_date == new_datetime_2 assert scenario_1.cycle is None assert not scenario_1.is_primary assert len(scenario_1.subscribers) == 1 - assert len(scenario_1.tags) == 0 + assert not scenario_1.tags assert not scenario_1._is_in_context assert scenario_1.properties["qux"] == 9 assert "temp_key_3" not in scenario_1.properties.keys() diff --git a/tests/core/scenario/test_scenario_manager.py b/tests/core/scenario/test_scenario_manager.py index 84fad6a8ea..852d0bcba3 100644 --- a/tests/core/scenario/test_scenario_manager.py +++ b/tests/core/scenario/test_scenario_manager.py @@ -426,9 +426,14 @@ def test_assign_scenario_as_parent_of_task_and_additional_data_nodes(): scenario_1 = _ScenarioManager._create(scenario_config_1) sequence_1_s1 = scenario_1.sequences["sequence_1"] - assert all([sequence.parent_ids == {scenario_1.id} for sequence in scenario_1.sequences.values()]) + assert all( + sequence.parent_ids == {scenario_1.id} + for sequence in scenario_1.sequences.values() + ) tasks = scenario_1.tasks.values() - assert all([task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks]) + assert all( + task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks + ) data_nodes = {} for task in tasks: data_nodes.update(task.data_nodes) @@ -443,7 +448,10 @@ def test_assign_scenario_as_parent_of_task_and_additional_data_nodes(): sequence_1_s2 = scenario_2.sequences["sequence_1"] sequence_2_s2 = scenario_2.sequences["sequence_2"] - assert all([sequence.parent_ids == {scenario_2.id} for sequence in scenario_2.sequences.values()]) + assert all( + sequence.parent_ids == {scenario_2.id} + for sequence in scenario_2.sequences.values() + ) assert scenario_1.tasks["task_1"] == scenario_2.tasks["task_1"] assert scenario_1.tasks["task_1"].parent_ids == { scenario_1.id, @@ -487,7 +495,9 @@ def test_assign_scenario_as_parent_of_task_and_additional_data_nodes(): sequence_1_s1 = scenario_1.sequences["sequence_1"] assert scenario_1.sequences["sequence_1"].parent_ids == {scenario_1.id} tasks = scenario_1.tasks.values() - assert all([task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks]) + assert all( + task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks + ) data_nodes = {} for task in tasks: data_nodes.update(task.data_nodes) @@ -578,7 +588,7 @@ def test_scenario_manager_only_creates_data_node_once(): scenario_1_sorted_tasks = scenario_1._get_sorted_tasks() expected = [{task_mult_by_2_config.id, task_mult_by_4_config.id}, {task_mult_by_3_config.id}] for i, list_tasks_by_level in enumerate(scenario_1_sorted_tasks): - assert set([t.config_id for t in list_tasks_by_level]) == expected[i] + assert {t.config_id for t in list_tasks_by_level} == expected[i] assert scenario_1.cycle.frequency == Frequency.DAILY _ScenarioManager._create(scenario_config) @@ -992,7 +1002,9 @@ def test_is_submittable(): dn_config = Config.configure_in_memory_data_node("dn", 10) task_config = Config.configure_task("task", print, [dn_config]) - scenario_config = Config.configure_scenario("sc", set([task_config]), set(), Frequency.DAILY) + scenario_config = Config.configure_scenario( + "sc", {task_config}, set(), Frequency.DAILY + ) scenario = _ScenarioManager._create(scenario_config) assert len(_ScenarioManager._get_all()) == 1 @@ -1126,8 +1138,11 @@ def test_submit_task_with_input_dn_wrong_file_path(caplog): for input_dn in scenario.data_nodes.values() if input_dn not in scenario.get_inputs() ] - assert all([expected_output in stdout for expected_output in expected_outputs]) - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): @@ -1155,8 +1170,11 @@ def test_submit_task_with_one_input_dn_wrong_file_path(caplog): for input_dn in scenario.data_nodes.values() if input_dn.config_id != "wrong_csv_file_path" ] - assert all([expected_output in stdout for expected_output in expected_outputs]) - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) def subtraction(n1, n2): diff --git a/tests/core/scenario/test_scenario_manager_with_sql_repo.py b/tests/core/scenario/test_scenario_manager_with_sql_repo.py index b9adc0ab6b..0173fcb004 100644 --- a/tests/core/scenario/test_scenario_manager_with_sql_repo.py +++ b/tests/core/scenario/test_scenario_manager_with_sql_repo.py @@ -398,7 +398,7 @@ def test_scenario_manager_only_creates_data_node_once(init_sql_repo, init_manage scenario_1_sorted_tasks = scenario_1._get_sorted_tasks() expected = [{task_mult_by_2_config.id, task_mult_by_4_config.id}, {task_mult_by_3_config.id}] for i, list_tasks_by_level in enumerate(scenario_1_sorted_tasks): - assert set([t.config_id for t in list_tasks_by_level]) == expected[i] + assert {t.config_id for t in list_tasks_by_level} == expected[i] assert scenario_1.cycle.frequency == Frequency.DAILY _ScenarioManager._create(scenario_config) diff --git a/tests/core/sequence/test_sequence.py b/tests/core/sequence/test_sequence.py index 0dc59a3c59..fca361b011 100644 --- a/tests/core/sequence/test_sequence.py +++ b/tests/core/sequence/test_sequence.py @@ -70,6 +70,7 @@ def test_create_sequence(): assert sequence_1.id is not None with mock.patch("taipy.core.get") as get_mck: + class MockOwner: label = "owner_label" @@ -77,7 +78,7 @@ def get_label(self): return self.label get_mck.return_value = MockOwner() - assert sequence_1.get_label() == "owner_label > " + sequence_1.id + assert sequence_1.get_label() == f"owner_label > {sequence_1.id}" assert sequence_1.get_simple_label() == sequence_1.id sequence_2 = Sequence( @@ -95,6 +96,7 @@ def get_label(self): assert sequence_2.parent_ids == {"parent_id_1", "parent_id_2"} with mock.patch("taipy.core.get") as get_mck: + class MockOwner: label = "owner_label" @@ -102,7 +104,7 @@ def get_label(self): return self.label get_mck.return_value = MockOwner() - assert sequence_2.get_label() == "owner_label > " + sequence_2.name + assert sequence_2.get_label() == f"owner_label > {sequence_2.name}" assert sequence_2.get_simple_label() == sequence_2.name @@ -188,7 +190,10 @@ def assert_equal(tasks_a, tasks_b) -> bool: return False else: index_task_b = tasks_b.index(task_a) - if any([isinstance(task_b, list) for task_b in tasks_b[i : index_task_b + 1]]): + if any( + isinstance(task_b, list) + for task_b in tasks_b[i : index_task_b + 1] + ): return False return True @@ -579,7 +584,7 @@ def test_auto_set_and_reload(task): assert len(sequence_2.subscribers) == 2 sequence_1.subscribers = [] - assert len(sequence_1.subscribers) == 0 + assert not sequence_1.subscribers assert len(sequence_2.subscribers) == 0 # auto set & reload on properties attribute @@ -659,14 +664,14 @@ def test_auto_set_and_reload(task): assert len(sequence.tasks) == 1 assert sequence.tasks[task.config_id].id == task.id - assert len(sequence.subscribers) == 0 + assert not sequence.subscribers assert sequence._is_in_context assert sequence.properties["qux"] == 5 assert sequence.properties["temp_key_3"] == 1 assert sequence.properties["temp_key_4"] == 0 assert sequence.properties["temp_key_5"] == 0 - assert len(sequence_1.tasks) == 0 + assert not sequence_1.tasks assert len(sequence_1.subscribers) == 1 assert not sequence_1._is_in_context assert sequence_1.properties["qux"] == 9 diff --git a/tests/core/sequence/test_sequence_manager.py b/tests/core/sequence/test_sequence_manager.py index ff7c04ee4f..1cb9cdb644 100644 --- a/tests/core/sequence/test_sequence_manager.py +++ b/tests/core/sequence/test_sequence_manager.py @@ -77,7 +77,7 @@ def __init(): input_dn = InMemoryDataNode("foo", Scope.SCENARIO) output_dn = InMemoryDataNode("foo", Scope.SCENARIO) task = Task("task", {}, print, [input_dn], [output_dn], TaskId("task_id")) - scenario = Scenario("scenario", set([task]), {}, set()) + scenario = Scenario("scenario", {task}, {}, set()) _ScenarioManager._set(scenario) return scenario, task @@ -208,10 +208,10 @@ def test_get_all_on_multiple_versions_environment(): def test_is_submittable(): dn = InMemoryDataNode("dn", Scope.SCENARIO, properties={"default_data": 10}) task = Task("task", {}, print, [dn]) - scenario = Scenario("scenario", set([task]), {}, set()) + scenario = Scenario("scenario", {task}, {}, set()) _ScenarioManager._set(scenario) - scenario.add_sequences({"sequence": list([task])}) + scenario.add_sequences({"sequence": [task]}) sequence = scenario.sequences["sequence"] assert len(_SequenceManager._get_all()) == 1 @@ -294,7 +294,7 @@ def _lock_dn_output_and_create_job( _SequenceManager._submit(sequence) calls_ids = [t.id for t in _TaskManager._orchestrator().submit_calls] - tasks_ids = tasks_ids * 2 + tasks_ids *= 2 assert set(calls_ids) == set(tasks_ids) @@ -769,11 +769,14 @@ def test_export(tmpdir_factory): task = Task("task", {}, print, id=TaskId("task_id")) scenario = Scenario( "scenario", - set([task]), + {task}, {}, set(), version="1.0", - sequences={"sequence_1": {}, "sequence_2": {"tasks": [task], "properties": {"xyz": "acb"}}}, + sequences={ + "sequence_1": {}, + "sequence_2": {"tasks": [task], "properties": {"xyz": "acb"}}, + }, ) _TaskManager._set(task) _ScenarioManager._set(scenario) @@ -939,8 +942,11 @@ def test_submit_task_with_input_dn_wrong_file_path(caplog): for input_dn in sequence.data_nodes.values() if input_dn not in sequence.get_inputs() ] - assert all([expected_output in stdout for expected_output in expected_outputs]) - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): @@ -972,5 +978,8 @@ def test_submit_task_with_one_input_dn_wrong_file_path(caplog): for input_dn in sequence.data_nodes.values() if input_dn.config_id != "wrong_csv_file_path" ] - assert all([expected_output in stdout for expected_output in expected_outputs]) - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) diff --git a/tests/core/task/test_task.py b/tests/core/task/test_task.py index 0ca6b16631..b759634d4a 100644 --- a/tests/core/task/test_task.py +++ b/tests/core/task/test_task.py @@ -83,6 +83,7 @@ def test_create_task(): task.bar with mock.patch("taipy.core.get") as get_mck: + class MockOwner: label = "owner_label" @@ -90,7 +91,7 @@ def get_label(self): return self.label get_mck.return_value = MockOwner() - assert task.get_label() == "owner_label > " + task.config_id + assert task.get_label() == f"owner_label > {task.config_id}" assert task.get_simple_label() == task.config_id diff --git a/tests/core/task/test_task_manager.py b/tests/core/task/test_task_manager.py index 23802e3eba..3bf7df849f 100644 --- a/tests/core/task/test_task_manager.py +++ b/tests/core/task/test_task_manager.py @@ -82,7 +82,7 @@ def test_assign_task_as_parent_of_datanode(): dns = {dn.config_id: dn for dn in _DataManager._get_all()} assert dns["dn_1"].parent_ids == {tasks[0].id} - assert dns["dn_2"].parent_ids == set([tasks[0].id, tasks[1].id]) + assert dns["dn_2"].parent_ids == {tasks[0].id, tasks[1].id} assert dns["dn_3"].parent_ids == {tasks[1].id} @@ -391,8 +391,11 @@ def test_submit_task_with_input_dn_wrong_file_path(caplog): f"path : {input_dn.path} " for input_dn in task.output.values() ] - assert all([expected_output in stdout for expected_output in expected_outputs]) - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): @@ -416,8 +419,11 @@ def test_submit_task_with_one_input_dn_wrong_file_path(caplog): f"path : {input_dn.path} " for input_dn in [task.input["pickle_file_path"], task.output["wrong_parquet_file_path"]] ] - assert all([expected_output in stdout for expected_output in expected_outputs]) - assert all([expected_output not in stdout for expected_output in not_expected_outputs]) + assert all(expected_output in stdout for expected_output in expected_outputs) + assert all( + expected_output not in stdout + for expected_output in not_expected_outputs + ) def test_get_tasks_by_config_id(): diff --git a/tests/core/test_core_cli.py b/tests/core/test_core_cli.py index 6011c712b2..88deff3c83 100644 --- a/tests/core/test_core_cli.py +++ b/tests/core/test_core_cli.py @@ -467,7 +467,7 @@ def test_modify_job_configuration_dont_stop_application(caplog, init_config): core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) - assert all([job.is_finished() for job in jobs]) + assert all(job.is_finished() for job in jobs) core.stop() init_config() diff --git a/tests/core/test_core_cli_with_sql_repo.py b/tests/core/test_core_cli_with_sql_repo.py index 054e1d0f7a..abfb0dac5c 100644 --- a/tests/core/test_core_cli_with_sql_repo.py +++ b/tests/core/test_core_cli_with_sql_repo.py @@ -514,7 +514,7 @@ def test_modify_job_configuration_dont_stop_application(caplog, init_sql_repo, i core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) - assert all([job.is_finished() for job in jobs]) + assert all(job.is_finished() for job in jobs) core.stop() init_config() diff --git a/tests/core/test_taipy.py b/tests/core/test_taipy.py index 34f3d80f16..ce1741bedf 100644 --- a/tests/core/test_taipy.py +++ b/tests/core/test_taipy.py @@ -558,7 +558,7 @@ def test_block_config_when_core_is_running_in_development_mode(self): tp.submit(scenario_1) with pytest.raises(ConfigurationUpdateBlocked): - Config.configure_scenario("block_scenario", set([task_cfg_1])) + Config.configure_scenario("block_scenario", {task_cfg_1}) core.stop() def test_block_config_when_core_is_running_in_standalone_mode(self): @@ -577,7 +577,7 @@ def test_block_config_when_core_is_running_in_standalone_mode(self): tp.submit(scenario_1, wait=True) with pytest.raises(ConfigurationUpdateBlocked): - Config.configure_scenario("block_scenario", set([task_cfg_1])) + Config.configure_scenario("block_scenario", {task_cfg_1}) core.stop() def test_get_data_node(self, data_node): @@ -694,7 +694,7 @@ def assert_result_parents_and_expected_parents(parents, expected_parents): for key, items in expected_parents.items(): assert len(parents[key]) == len(expected_parents[key]) parent_ids = [parent.id for parent in parents[key]] - assert all([item.id in parent_ids for item in items]) + assert all(item.id in parent_ids for item in items) dn_config_1 = Config.configure_data_node(id="d1", storage_type="in_memory", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(id="d2", storage_type="in_memory", scope=Scope.SCENARIO) diff --git a/tests/core/version/test_version_cli_with_sql_repo.py b/tests/core/version/test_version_cli_with_sql_repo.py index d47c42a6f5..7268348fc8 100644 --- a/tests/core/version/test_version_cli_with_sql_repo.py +++ b/tests/core/version/test_version_cli_with_sql_repo.py @@ -301,6 +301,6 @@ def config_scenario(): ) data_node_2_config = Config.configure_data_node(id="d2", storage_type="csv", default_path="foo.csv") task_config = Config.configure_task("my_task", twice, data_node_1_config, data_node_2_config) - scenario_config = Config.configure_scenario("my_scenario", [task_config], frequency=Frequency.DAILY) - - return scenario_config + return Config.configure_scenario( + "my_scenario", [task_config], frequency=Frequency.DAILY + ) diff --git a/tests/gui/builder/control/test_chart.py b/tests/gui/builder/control/test_chart.py index ed865e2afe..7bef5d2127 100644 --- a/tests/gui/builder/control/test_chart.py +++ b/tests/gui/builder/control/test_chart.py @@ -184,9 +184,9 @@ def test_map_builder(gui: Gui, helpers): def test_chart_indexed_properties_builder(gui: Gui, helpers): - data: t.Dict[str, t.Any] = {} - data["Date"] = [datetime.datetime(2021, 12, i) for i in range(1, 31)] - + data: t.Dict[str, t.Any] = { + "Date": [datetime.datetime(2021, 12, i) for i in range(1, 31)] + } data["La Rochelle"] = [10 + 6 * random.random() for _ in range(1, 31)] data["Montpellier"] = [16 + 6 * random.random() for _ in range(1, 31)] data["Paris"] = [6 + 6 * random.random() for _ in range(1, 31)] @@ -217,9 +217,9 @@ def test_chart_indexed_properties_builder(gui: Gui, helpers): def test_chart_indexed_properties_with_arrays_builder(gui: Gui, helpers): - data: t.Dict[str, t.Any] = {} - data["Date"] = [datetime.datetime(2021, 12, i) for i in range(1, 31)] - + data: t.Dict[str, t.Any] = { + "Date": [datetime.datetime(2021, 12, i) for i in range(1, 31)] + } data["La Rochelle"] = [10 + 6 * random.random() for _ in range(1, 31)] data["Montpellier"] = [16 + 6 * random.random() for _ in range(1, 31)] data["Paris"] = [6 + 6 * random.random() for _ in range(1, 31)] diff --git a/tests/gui/control/test_chart.py b/tests/gui/control/test_chart.py index 6062dab27c..363c8c7dc4 100644 --- a/tests/gui/control/test_chart.py +++ b/tests/gui/control/test_chart.py @@ -160,9 +160,9 @@ def test_map_md(gui: Gui, helpers): def test_chart_indexed_properties(gui: Gui, helpers): - data: t.Dict[str, t.Any] = {} - data["Date"] = [datetime.datetime(2021, 12, i) for i in range(1, 31)] - + data: t.Dict[str, t.Any] = { + "Date": [datetime.datetime(2021, 12, i) for i in range(1, 31)] + } data["La Rochelle"] = [10 + 6 * random.random() for _ in range(1, 31)] data["Montpellier"] = [16 + 6 * random.random() for _ in range(1, 31)] data["Paris"] = [6 + 6 * random.random() for _ in range(1, 31)] @@ -185,9 +185,9 @@ def test_chart_indexed_properties(gui: Gui, helpers): def test_chart_indexed_properties_with_arrays(gui: Gui, helpers): - data: t.Dict[str, t.Any] = {} - data["Date"] = [datetime.datetime(2021, 12, i) for i in range(1, 31)] - + data: t.Dict[str, t.Any] = { + "Date": [datetime.datetime(2021, 12, i) for i in range(1, 31)] + } data["La Rochelle"] = [10 + 6 * random.random() for _ in range(1, 31)] data["Montpellier"] = [16 + 6 * random.random() for _ in range(1, 31)] data["Paris"] = [6 + 6 * random.random() for _ in range(1, 31)] diff --git a/tests/gui/e2e/page_scopes/assets2_class_scopes/page1.py b/tests/gui/e2e/page_scopes/assets2_class_scopes/page1.py index 6dbf98cc36..4bfb0e721c 100644 --- a/tests/gui/e2e/page_scopes/assets2_class_scopes/page1.py +++ b/tests/gui/e2e/page_scopes/assets2_class_scopes/page1.py @@ -20,5 +20,5 @@ def __init__(self): def create_page(self): return Markdown("page1.md") - def reset(state): - state.operand_2 = 0 + def reset(self): + self.operand_2 = 0 diff --git a/tests/gui/utils/test_map_dict.py b/tests/gui/utils/test_map_dict.py index 2934ab5adf..7e664646db 100644 --- a/tests/gui/utils/test_map_dict.py +++ b/tests/gui/utils/test_map_dict.py @@ -39,23 +39,15 @@ def test_map_dict(): assert len(md) == 1 md.clear() assert len(md) == 0 - assert len(d) == 0 + assert not d assert len(md_copy) == 3 - v1 = "" - for k in md_copy: - v1 += k + v1 = "".join(md_copy) assert v1 == "abc" - v1 = "" - for k in md_copy.keys(): - v1 += k + v1 = "".join(md_copy.keys()) assert v1 == "abc" - v1 = "" - for k in md_copy.__reversed__(): - v1 += k + v1 = "".join(md_copy.__reversed__()) assert v1 == "cba" - v1 = 0 - for k in md_copy.values(): - v1 += k + v1 = sum(md_copy.values()) assert v1 == 6 # 1+2+3 v1 = md_copy.setdefault("a", 5) assert v1 == 1 @@ -66,8 +58,7 @@ def test_map_dict(): md = _MapDict("not_a_dict") assert False except Exception: - assert True - pass + pass def test_map_dict_update(): @@ -76,14 +67,12 @@ def test_map_dict_update(): def update(k, v): update_values[0] = k update_values[1] = v - pass d = {"a": 1, "b": "2"} md = _MapDict(d, update) md.__setitem__("a", 3) assert update_values[0] == "a" assert update_values[1] == 3 - pass def test_map_dict_update_full_dictionary_1(): diff --git a/tests/gui_core/test_context_is_deletable.py b/tests/gui_core/test_context_is_deletable.py index 7452591ee3..db3729ddc3 100644 --- a/tests/gui_core/test_context_is_deletable.py +++ b/tests/gui_core/test_context_is_deletable.py @@ -29,9 +29,7 @@ def mock_core_get(entity_id): return a_scenario if entity_id == a_job.id: return a_job - if entity_id == a_datanode.id: - return a_datanode - return a_task + return a_datanode if entity_id == a_datanode.id else a_task def mock_is_deletable_false(entity_id): @@ -87,9 +85,9 @@ def test_crud_scenario(self): assert str(assign.call_args.args[1]).endswith("is not deletable.") def test_act_on_jobs(self): - with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get), patch( - "taipy.gui_core._context.is_deletable", side_effect=mock_is_true - ): + with (patch("taipy.gui_core._context.core_get", side_effect=mock_core_get), patch( + "taipy.gui_core._context.is_deletable", side_effect=mock_is_true + )): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.act_on_jobs( @@ -103,7 +101,7 @@ def test_act_on_jobs(self): ) assign.assert_called_once() assert assign.call_args.args[0] == "gui_core_js_error" - assert str(assign.call_args.args[1]).find("is not deletable.") == -1 + assert "is not deletable." not in str(assign.call_args.args[1]) assign.reset_mock() with patch("taipy.gui_core._context.is_readable", side_effect=mock_is_deletable_false): diff --git a/tests/gui_core/test_context_is_editable.py b/tests/gui_core/test_context_is_editable.py index 84f9829670..5110851471 100644 --- a/tests/gui_core/test_context_is_editable.py +++ b/tests/gui_core/test_context_is_editable.py @@ -31,9 +31,7 @@ def mock_core_get(entity_id): return a_scenario if entity_id == a_job.id: return a_job - if entity_id == a_datanode.id: - return a_datanode - return a_task + return a_datanode if entity_id == a_datanode.id else a_task def mock_is_editable_false(entity_id): @@ -117,9 +115,9 @@ def test_edit_entity(self): assert str(assign.call_args.args[1]).endswith("is not editable.") def test_act_on_jobs(self): - with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get), patch( - "taipy.gui_core._context.is_deletable", side_effect=mock_is_true - ): + with (patch("taipy.gui_core._context.core_get", side_effect=mock_core_get), patch( + "taipy.gui_core._context.is_deletable", side_effect=mock_is_true + )): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.act_on_jobs( @@ -133,7 +131,7 @@ def test_act_on_jobs(self): ) assign.assert_called_once() assert assign.call_args.args[0] == "gui_core_js_error" - assert str(assign.call_args.args[1]).find("is not editable.") == -1 + assert "is not editable." not in str(assign.call_args.args[1]) assign.reset_mock() with patch("taipy.gui_core._context.is_readable", side_effect=mock_is_editable_false): diff --git a/tests/gui_core/test_context_is_promotable.py b/tests/gui_core/test_context_is_promotable.py index 48dd2e9330..1801a8460a 100644 --- a/tests/gui_core/test_context_is_promotable.py +++ b/tests/gui_core/test_context_is_promotable.py @@ -30,9 +30,7 @@ def mock_core_get(entity_id): return a_scenario if entity_id == a_job.id: return a_job - if entity_id == a_datanode.id: - return a_datanode - return a_task + return a_datanode if entity_id == a_datanode.id else a_task def mock_is_promotable_false(entity_id): diff --git a/tests/gui_core/test_context_is_readable.py b/tests/gui_core/test_context_is_readable.py index 7b450972e1..416d31deb1 100644 --- a/tests/gui_core/test_context_is_readable.py +++ b/tests/gui_core/test_context_is_readable.py @@ -39,9 +39,7 @@ def mock_core_get(entity_id): return a_scenario if entity_id == a_job.id: return a_job - if entity_id == a_datanode.id: - return a_datanode - return a_task + return a_datanode if entity_id == a_datanode.id else a_task class MockState: @@ -143,12 +141,8 @@ def test_scenario_status_callback(self): gui_core_context = _GuiCoreContext(Mock()) gui_core_context.scenario_status_callback(a_job.id) mockget.assert_called() - found = False - for call in mockget.call_args_list: - if call.args[0] == a_job.id: - found = True - break - assert found is True + found = any(call.args[0] == a_job.id for call in mockget.call_args_list) + assert found mockget.reset_mock() with patch("taipy.gui_core._context.is_readable", side_effect=mock_is_readable_false): @@ -178,9 +172,9 @@ def test_job_adapter(self): assert outcome is None def test_act_on_jobs(self): - with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get), patch( - "taipy.gui_core._context.is_deletable", side_effect=mock_is_true - ): + with (patch("taipy.gui_core._context.core_get", side_effect=mock_core_get), patch( + "taipy.gui_core._context.is_deletable", side_effect=mock_is_true + )): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.act_on_jobs( @@ -194,7 +188,7 @@ def test_act_on_jobs(self): ) assign.assert_called_once() assert assign.call_args.args[0] == "gui_core_js_error" - assert str(assign.call_args.args[1]).find("is not readable.") == -1 + assert "is not readable." not in str(assign.call_args.args[1]) assign.reset_mock() gui_core_context.act_on_jobs( @@ -208,7 +202,7 @@ def test_act_on_jobs(self): ) assign.assert_called_once() assert assign.call_args.args[0] == "gui_core_js_error" - assert str(assign.call_args.args[1]).find("is not readable.") == -1 + assert "is not readable." not in str(assign.call_args.args[1]) assign.reset_mock() with patch("taipy.gui_core._context.is_readable", side_effect=mock_is_readable_false): diff --git a/tests/gui_core/test_context_is_submitable.py b/tests/gui_core/test_context_is_submitable.py index a924d99894..f14bc5d8a3 100644 --- a/tests/gui_core/test_context_is_submitable.py +++ b/tests/gui_core/test_context_is_submitable.py @@ -38,9 +38,7 @@ def mock_core_get(entity_id): return a_scenario if entity_id == a_job.id: return a_job - if entity_id == a_datanode.id: - return a_datanode - return a_task + return a_datanode if entity_id == a_datanode.id else a_task class MockState: diff --git a/tests/rest/conftest.py b/tests/rest/conftest.py index b5d69b7225..e8d1b641cb 100644 --- a/tests/rest/conftest.py +++ b/tests/rest/conftest.py @@ -151,10 +151,12 @@ def default_datanode_config(): @pytest.fixture def default_datanode_config_list(): - configs = [] - for i in range(10): - configs.append(Config.configure_data_node(id=f"ds_{i}", storage_type="in_memory", scope=Scope.SCENARIO)) - return configs + return [ + Config.configure_data_node( + id=f"ds_{i}", storage_type="in_memory", scope=Scope.SCENARIO + ) + for i in range(10) + ] def __default_task(): @@ -197,10 +199,7 @@ def default_task_config(): @pytest.fixture def default_task_config_list(): - configs = [] - for i in range(10): - configs.append(Config.configure_task(f"task_{i}", print, [], [])) - return configs + return [Config.configure_task(f"task_{i}", print, [], []) for i in range(10)] def __default_sequence(): @@ -260,12 +259,11 @@ def __create_cycle(name="foo"): @pytest.fixture def create_cycle_list(): - cycles = [] manager = _CycleManager for i in range(10): c = __create_cycle(f"cycle_{i}") manager._set(c) - return cycles + return [] @pytest.fixture @@ -300,12 +298,11 @@ def default_job(): @pytest.fixture def create_job_list(): - jobs = [] manager = _JobManager - for i in range(10): + for _ in range(10): c = __create_job() manager._set(c) - return jobs + return [] @pytest.fixture(scope="function", autouse=True) diff --git a/tools/frontend/bundle_build.py b/tools/frontend/bundle_build.py index 18f8d7f28d..6e140749ee 100644 --- a/tools/frontend/bundle_build.py +++ b/tools/frontend/bundle_build.py @@ -20,8 +20,9 @@ def build_gui(root_path: Path): print(f"Building taipy-gui frontend bundle in {root_path}.") - already_exists = (root_path / "taipy" / "gui" / "webapp" / "index.html").exists() - if already_exists: + if already_exists := ( + root_path / "taipy" / "gui" / "webapp" / "index.html" + ).exists(): print(f'Found taipy-gui frontend bundle in {root_path / "taipy" / "gui" / "webapp"}.') else: subprocess.run(["npm", "ci"], cwd=root_path / "frontend" / "taipy-gui" / "dom", check=True, shell=with_shell) @@ -33,8 +34,9 @@ def build_gui(root_path: Path): def build_taipy(root_path: Path): print(f"Building taipy frontend bundle in {root_path}.") - already_exists = (root_path / "taipy" / "gui_core" / "lib" / "taipy-gui-core.js").exists() - if already_exists: + if already_exists := ( + root_path / "taipy" / "gui_core" / "lib" / "taipy-gui-core.js" + ).exists(): print(f'Found taipy frontend bundle in {root_path / "taipy" / "gui_core" / "lib"}.') else: # Specify the correct path to taipy-gui in gui/.env file diff --git a/tools/gui/generate_pyi.py b/tools/gui/generate_pyi.py index 45fe3ccb53..7942ad8ab9 100644 --- a/tools/gui/generate_pyi.py +++ b/tools/gui/generate_pyi.py @@ -22,7 +22,7 @@ # Generate gui pyi file (gui/gui.pyi) # ############################################################ gui_py_file = "./taipy/gui/gui.py" -gui_pyi_file = gui_py_file + "i" +gui_pyi_file = f"{gui_py_file}i" os.system(f"pipenv run stubgen {gui_py_file} --no-import --parse-only --export-less -o ./") @@ -50,7 +50,7 @@ # Generate Page Builder pyi file (gui/builder/__init__.pyi) # ############################################################ builder_py_file = "./taipy/gui/builder/__init__.py" -builder_pyi_file = builder_py_file + "i" +builder_pyi_file = f"{builder_py_file}i" with open("./taipy/gui/viselements.json", "r") as file: viselements = json.load(file) with open("./tools/builder/block.txt", "r") as file: diff --git a/tools/packages/taipy-gui/setup.py b/tools/packages/taipy-gui/setup.py index 843b081eb6..2db17d7ff1 100644 --- a/tools/packages/taipy-gui/setup.py +++ b/tools/packages/taipy-gui/setup.py @@ -49,8 +49,9 @@ class NPMInstall(build_py): def run(self): with_shell = platform.system() == "Windows" print(f"Building taipy-gui frontend bundle in {root_folder}.") - already_exists = (root_folder / "taipy" / "gui" / "webapp" / "index.html").exists() - if already_exists: + if already_exists := ( + root_folder / "taipy" / "gui" / "webapp" / "index.html" + ).exists(): print(f'Found taipy-gui frontend bundle in {root_folder / "taipy" / "gui" / "webapp"}.') else: subprocess.run( diff --git a/tools/packages/taipy/setup.py b/tools/packages/taipy/setup.py index 04b8c22be5..3aee15042c 100644 --- a/tools/packages/taipy/setup.py +++ b/tools/packages/taipy/setup.py @@ -50,8 +50,9 @@ class NPMInstall(build_py): def run(self): with_shell = platform.system() == "Windows" print(f"Building taipy frontend bundle in {root_folder}.") - already_exists = (root_folder / "taipy" / "gui_core" / "lib" / "taipy-gui-core.js").exists() - if already_exists: + if already_exists := ( + root_folder / "taipy" / "gui_core" / "lib" / "taipy-gui-core.js" + ).exists(): print(f'Found taipy frontend bundle in {root_folder / "taipy" / "gui_core" / "lib"}.') else: # Specify the correct path to taipy-gui in gui/.env file