Skip to content

Commit

Permalink
Merge pull request #42 from cthoyt/high-level
Browse files Browse the repository at this point in the history
Add high level converter getter
  • Loading branch information
cmungall authored Sep 15, 2023
2 parents 80f566a + 9bcdfc4 commit 589ded6
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 18 deletions.
26 changes: 10 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,10 @@ pip install prefixmaps
To use in combination with [curies](https://github.com/cthoyt/curies) library:

```python
from prefixmaps.io.parser import load_multi_context
from prefixmaps import load_converter
from curies import Converter

context = load_multi_context(["obo", "bioregistry.upper", "linked_data", "prefixcc"])
converter: Converter = context.as_converter()
converter: Converter = load_converter(["obo", "bioregistry.upper", "linked_data", "prefixcc"])

>>> converter.expand("CHEBI:1")
'http://purl.obolibrary.org/obo/CHEBI_1'
Expand All @@ -60,21 +59,19 @@ converter: Converter = context.as_converter()
If we prioritize prefix.cc the OBO prefix is ignored:

```python
context = load_multi_context(["prefixcc", "obo"])
converter: Converter = context.as_converter()
converter = load_converter(["prefixcc", "obo"])

>>> converter.expand("GEO:1")
>>> converter.expand("geo:1")
'http://www.opengis.net/ont/geosparql#1'
```

Even though prefix expansion is case sensitive, we intentionally block conflicts that differ only in case.
Even though prefix expansion is case-sensitive, we intentionally block conflicts that differ only in case.

If we push `bioregistry` at the start of the list then GEOGEO can be used as the prefix for the OBO ontology:

```python
context = load_multi_context(["bioregistry", "prefixcc", "obo"])
converter: Converter = context.as_converter()
converter = load_converter(["bioregistry", "prefixcc", "obo"])

>>> converter.expand("geo:1")
'http://identifiers.org/geo/1'
Expand All @@ -88,8 +85,7 @@ Note that from the OBO perspective, GEOGEO is non-canonical.
We get similar results using the upper-normalized variant of `bioregistry`:

```python
context = load_multi_context(["bioregistry.upper", "prefixcc", "obo"])
converter: Converter = context.as_converter()
converter = load_converter(["bioregistry.upper", "prefixcc", "obo"])

>>> converter.expand("GEO:1")
'http://identifiers.org/geo/1'
Expand All @@ -101,8 +97,7 @@ converter: Converter = context.as_converter()
Users of OBO ontologies will want to place OBO at the start of the list:

```python
context = load_multi_context(["obo", "bioregistry.upper", "prefixcc"])
converter: Converter = context.as_converter()
converter = load_converter(["obo", "bioregistry.upper", "prefixcc"])

>>> converter.expand("geo:1")
>>> converter.expand("GEO:1")
Expand All @@ -117,8 +112,7 @@ GEO. This could be added in future with a unique OBO prefix.
You can use the ready-made "merged" prefix set, which prioritizes OBO:

```python
context = load_context("merged")
converter: Converter = context.as_converter()
converter = load_converter("merged")

>>> converter.expand("GEOGEO:1")
>>> converter.expand("GEO:1")
Expand All @@ -128,13 +122,13 @@ converter: Converter = context.as_converter()

### Network independence and requesting latest versions

By default this will make use of metadata distributed alongside the package. This has certain advantages in terms
By default, this will make use of metadata distributed alongside the package. This has certain advantages in terms
of reproducibility, but it means if a new ontology or prefix is added to an upstream source you won't see this.

To refresh and use the latest upstream:

```python
ctxt = load_context("obo", refresh=True)
converter = load_converter("obo", refresh=True)
```

This will perform a fetch from http://obofoundry.org/registry/obo_prefixes.ttl
Expand Down
3 changes: 2 additions & 1 deletion src/prefixmaps/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
from .datamodel.context import Context, PrefixExpansion, StatusType
from .io.parser import load_context, load_multi_context
from .io.parser import load_context, load_converter, load_multi_context

try:
from importlib.metadata import version
except ImportError: # for Python<3.8
from importlib_metadata import version

__all__ = [
"load_converter",
"load_context",
"load_multi_context",
"Context",
Expand Down
11 changes: 10 additions & 1 deletion src/prefixmaps/io/parser.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
from csv import DictReader
from pathlib import Path
from typing import List, TextIO
from typing import List, TextIO, Union

import yaml
from curies import Converter

from prefixmaps.data import data_path
from prefixmaps.datamodel.context import CONTEXT, Context, PrefixExpansion, StatusType

__all__ = [
"load_multi_context",
"load_context",
"load_converter",
]


Expand All @@ -23,6 +25,13 @@ def context_path(name: CONTEXT) -> Path:
return data_path / f"{name}.csv"


def load_converter(names: Union[CONTEXT, List[CONTEXT]], refresh: bool = False) -> Converter:
"""Get a converter."""
if isinstance(names, str):
return load_context(names, refresh=refresh).as_converter()
return load_multi_context(names, refresh=refresh).as_converter()


def load_multi_context(names: List[CONTEXT], refresh=False) -> Context:
"""
Merges multiple contexts
Expand Down
3 changes: 3 additions & 0 deletions tests/test_core/test_curies.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ def test_load(self):
converter = context.as_converter()
self.assertIsInstance(converter, Converter)

self.assertEqual(converter.prefix_map, prefixmaps.load_converter("bioportal").prefix_map)
self.assertEqual(converter.prefix_map, prefixmaps.load_converter(["bioportal"]).prefix_map)

# prefix map checks
self.assertIn(prefix, converter.prefix_map)
self.assertEqual(uri_prefix_1, converter.prefix_map[prefix])
Expand Down

0 comments on commit 589ded6

Please sign in to comment.