Skip to content

Commit

Permalink
suggestions from code review
Browse files Browse the repository at this point in the history
  • Loading branch information
brownj85 committed Jul 21, 2023
1 parent e489aa6 commit 9f5939e
Show file tree
Hide file tree
Showing 9 changed files with 15 additions and 13 deletions.
2 changes: 1 addition & 1 deletion doc/introduction/data.rst
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ The :func:`~pennylane.data.load` function returns a ``list`` with the desired da
>>> H2data = H2datasets[0]

We can load datasets for multiple parameter values by providing a list of values instead of a single value.
To load all possible values, use the special value :const:`~pennylane.data.FULL`.
To load all possible values, use the special value :const:`~pennylane.data.FULL` or the string 'full':

>>> H2datasets = qml.data.load("qchem", molname="H2", basis="full", bondlength=[0.5, 1.1])
>>> print(H2datasets)
Expand Down
2 changes: 1 addition & 1 deletion docker/pennylane.dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ RUN pip install pytest pytest-cov pytest-mock flaky
RUN pip install -i https://test.pypi.org/simple/ pennylane-lightning --pre --upgrade
# hotfix, remove when pyscf 2.1 is released (currently no wheel for python3.10)
RUN pip install openfermionpyscf || true
RUN pip install hdf5 || true
RUN pip install hdf5 fsspec aiohttp || true
RUN make test && make coverage

# create Second small build.
Expand Down
2 changes: 1 addition & 1 deletion pennylane/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@
>>> dataset = qml.data.Dataset(hamiltonian = qml.data.attribute(
hamiltonian,
doc="The hamiltonian of the system"))
>>> dataset.eigen = attribute(
>>> dataset.eigen = qml.data.attribute(
{"eigvals": eigvals, "eigvecs": eigvecs},
doc="Eigenvalues and eigenvectors of the hamiltonain")
Expand Down
4 changes: 2 additions & 2 deletions pennylane/data/attributes/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ class DatasetArray(DatasetAttribute[HDF5Array, numpy.ndarray, ArrayLike]):

type_id = "array"

def __post_init__(self, value: ArrayLike, info: Optional[AttributeInfo]) -> None:
super().__post_init__(value, info)
def __post_init__(self, value: ArrayLike) -> None:
super().__post_init__(value)

array_interface = get_interface(value)
if array_interface not in ("numpy", "autograd"):
Expand Down
3 changes: 2 additions & 1 deletion pennylane/data/attributes/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ class DatasetDict( # pylint: disable=too-many-ancestors

type_id = "dict"

def __post_init__(self, value: typing.Mapping[str, T], info):
def __post_init__(self, value: typing.Mapping[str, T]):
super().__post_init__(value)
self.update(value)

@classmethod
Expand Down
4 changes: 3 additions & 1 deletion pennylane/data/attributes/list.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ class DatasetList( # pylint: disable=too-many-ancestors

type_id = "list"

def __post_init__(self, value: typing.Iterable[T], info):
def __post_init__(self, value: typing.Iterable[T]):
super().__post_init__(value)

self.extend(value)

@classmethod
Expand Down
4 changes: 2 additions & 2 deletions pennylane/data/attributes/sparse_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ class DatasetSparseArray(Generic[SparseT], DatasetAttribute[HDF5Group, SparseT,

type_id = "sparse_array"

def __post_init__(self, value: SparseT, info) -> None:
super().__post_init__(value, info)
def __post_init__(self, value: SparseT) -> None:
super().__post_init__(value)
self.info["sparse_array_class"] = type(value).__qualname__

@property
Expand Down
4 changes: 2 additions & 2 deletions pennylane/data/base/attribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def _value_init(

self._bind = self._set_value(value, info, parent, key)
self._check_bind()
self.__post_init__(value, self.info)
self.__post_init__(value)

@property
def info(self) -> AttributeInfo:
Expand Down Expand Up @@ -318,7 +318,7 @@ def consumes_types(cls) -> typing.Iterable[type]:
"""
return ()

def __post_init__(self, value: InitValueType, info: Optional[AttributeInfo]) -> None:
def __post_init__(self, value: InitValueType) -> None:
"""Called after __init__(), only during value initialization. Can be implemented
in subclasses that require additional initialization."""

Expand Down
3 changes: 1 addition & 2 deletions pennylane/data/base/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ def field( # pylint: disable=too-many-arguments, unused-argument
attribute_type: Union[Type[DatasetAttribute[HDF5Any, T, Any]], Literal[UNSET]] = UNSET,
doc: Optional[str] = None,
py_type: Optional[Any] = None,
is_param: bool = False,
**kwargs,
) -> Any:
"""Used to define fields on a declarative Dataset.
Expand All @@ -82,7 +81,7 @@ def field( # pylint: disable=too-many-arguments, unused-argument

return Field(
cast(Type[DatasetAttribute[HDF5Any, T, T]], attribute_type),
AttributeInfo(doc=doc, py_type=py_type, is_param=is_param, **kwargs),
AttributeInfo(doc=doc, py_type=py_type, **kwargs),
)


Expand Down

0 comments on commit 9f5939e

Please sign in to comment.