Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup #1406

Merged
merged 17 commits into from
May 4, 2023
4 changes: 0 additions & 4 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ jobs:
filters: |
docker:
- 'docker/**'

- name: Pull Docker
if: steps.changes.outputs.docker == 'false'
run: |
Expand Down Expand Up @@ -81,7 +80,6 @@ jobs:
cat <<EOF | docker run --rm -i -v $(pwd):/code ${{ env.DOCKER_IMAGE }} bash -
pip install -e /code/tests/drivers/fail_drivers --no-deps
pip install -e /code/examples/io_plugin --no-deps

pytest -r a \
--cov datacube \
--cov-report=xml \
Expand Down Expand Up @@ -114,7 +112,6 @@ jobs:
ls -lh ./dist/
twine check ./dist/*
EOF

- name: Publish to PyPi
if: |
github.event_name == 'push'
Expand All @@ -135,7 +132,6 @@ jobs:
else
echo "Skipping upload as 'PyPiToken' is not set"
fi

env:
TWINE_PASSWORD: ${{ secrets.PyPiToken }}

Expand Down
2 changes: 1 addition & 1 deletion datacube/api/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def load(self, product=None, measurements=None, output_crs=None, resolution=None
If a list is specified, the measurements will be returned in the order requested.
By default all available measurements are included.

:param \*\*query:
:param **query:
Search parameters for products and dimension ranges as described above.
For example: ``'x', 'y', 'time', 'crs'``.

Expand Down
2 changes: 1 addition & 1 deletion datacube/api/grid_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def shape(self):
@property
def product(self):
"""
:rtype: datacube.model.DatasetType
:rtype: datacube.model.Product
"""
return self.sources.values[0][0].product

Expand Down
4 changes: 2 additions & 2 deletions datacube/drivers/postgis/_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from datacube import utils
from datacube.model.fields import Expression, Field
from datacube.model import Range
from datacube.utils import get_doc_offset_safe
from datacube.utils import get_doc_offset

from datacube.drivers.postgis._schema import Dataset, search_field_index_map
from datacube.utils import cached_property
Expand Down Expand Up @@ -197,7 +197,7 @@ def _extract_offset_value(self, doc, doc_offsets, agg_function):
# It's a single offset.
doc_offsets = [doc_offsets]

values = (get_doc_offset_safe(offset, doc) for offset in doc_offsets)
values = (get_doc_offset(offset, doc) for offset in doc_offsets)
values = [self.parse_value(v) for v in values if v is not None]

if not values:
Expand Down
4 changes: 2 additions & 2 deletions datacube/drivers/postgres/_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from datacube import utils
from datacube.model.fields import Expression, Field
from datacube.model import Range
from datacube.utils import get_doc_offset_safe
from datacube.utils import get_doc_offset
from .sql import FLOAT8RANGE

from datacube.utils.dates import tz_aware
Expand Down Expand Up @@ -167,7 +167,7 @@ def _extract_offset_value(self, doc, doc_offsets, agg_function):
# It's a single offset.
doc_offsets = [doc_offsets]

values = (get_doc_offset_safe(offset, doc) for offset in doc_offsets)
values = (get_doc_offset(offset, doc) for offset in doc_offsets)
values = [self.parse_value(v) for v in values if v is not None]

if not values:
Expand Down
2 changes: 1 addition & 1 deletion datacube/index/memory/_products.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from datacube.index.fields import as_expression
from datacube.index.abstract import AbstractProductResource, QueryField
from datacube.index.memory._metadata_types import MetadataTypeResource
from datacube.model import DatasetType as Product
from datacube.model import Product
from datacube.utils import changes, jsonify_document, _readable_offset
from datacube.utils.changes import AllowPolicy, Change, Offset, check_doc_unchanged, get_doc_changes, classify_changes
from datacube.utils.documents import metadata_subset
Expand Down
4 changes: 2 additions & 2 deletions datacube/index/null/_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# SPDX-License-Identifier: Apache-2.0

from datacube.index.abstract import AbstractDatasetResource, DSID
from datacube.model import Dataset, DatasetType
from datacube.model import Dataset, Product
from typing import Iterable


Expand All @@ -31,7 +31,7 @@ def add(self, dataset: Dataset,
with_lineage: bool = True) -> Dataset:
raise NotImplementedError()

def search_product_duplicates(self, product: DatasetType, *args):
def search_product_duplicates(self, product: Product, *args):
return []

def can_update(self, dataset, updates_allowed=None):
Expand Down
6 changes: 3 additions & 3 deletions datacube/index/null/_products.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import logging

from datacube.index.abstract import AbstractProductResource
from datacube.model import DatasetType
from datacube.model import Product

from typing import Iterable

Expand All @@ -22,7 +22,7 @@ def add(self, product, allow_table_lock=False):
def can_update(self, product, allow_unsafe_updates=False, allow_table_lock=False):
raise NotImplementedError()

def update(self, product: DatasetType, allow_unsafe_updates=False, allow_table_lock=False):
def update(self, product: Product, allow_unsafe_updates=False, allow_table_lock=False):
raise NotImplementedError()

def get_unsafe(self, id_):
Expand All @@ -40,5 +40,5 @@ def search_robust(self, **query):
def search_by_metadata(self, metadata):
return []

def get_all(self) -> Iterable[DatasetType]:
def get_all(self) -> Iterable[Product]:
return []
2 changes: 1 addition & 1 deletion datacube/index/postgres/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class Index(AbstractIndex):
other connections are active. Or else use a separate instance of this class in each process.

:ivar datacube.index._datasets.DatasetResource datasets: store and retrieve :class:`datacube.model.Dataset`
:ivar datacube.index._products.ProductResource products: store and retrieve :class:`datacube.model.DatasetType`\
:ivar datacube.index._products.ProductResource products: store and retrieve :class:`datacube.model.Product`\
(should really be called Product)
:ivar datacube.index._metadata_types.MetadataTypeResource metadata_types: store and retrieve \
:class:`datacube.model.MetadataType`
Expand Down
8 changes: 8 additions & 0 deletions datacube/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@
"ExtraDimensions", "IngestorConfig"
]

from deprecat import deprecat

_LOG = logging.getLogger(__name__)

DEFAULT_SPATIAL_DIMS = ('y', 'x') # Used when product lacks grid_spec
Expand Down Expand Up @@ -93,6 +95,9 @@ def __init__(self,
self.archived_time = archived_time

@property
@deprecat(
reason="The 'type' attribute has been deprecated. Please use the 'product' attribute instead.",
version='1.9.0')
def type(self) -> "Product":
# For compatibility
return self.product
Expand Down Expand Up @@ -231,6 +236,9 @@ def is_archived(self) -> bool:
return self.archived_time is not None

@property
@deprecat(
reason="The 'is_active' attribute has been deprecated. Please use 'is_archived' instead.",
version="1.9.0")
def is_active(self) -> bool:
"""
Is this dataset active?
Expand Down
4 changes: 1 addition & 3 deletions datacube/model/properties.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def datetime_type(value):


def of_enum_type(
vals: Union[EnumMeta, Tuple[str, ...]] = None, lower=False, upper=False, strict=True
vals: Union[EnumMeta, Tuple[str, ...]] = None, lower=False, strict=True
) -> Callable[[str], str]:
if isinstance(vals, EnumMeta):
vals = tuple(vals.__members__.keys())
Expand All @@ -72,8 +72,6 @@ def normalise(v: str):
if isinstance(v, Enum):
v = v.name

if upper:
v = v.upper()
if lower:
v = v.lower()

Expand Down
2 changes: 1 addition & 1 deletion datacube/model/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def make_dataset(product, sources, extent, center_time, valid_data=None, uri=Non
"""
Create :class:`datacube.model.Dataset` for the data

:param DatasetType product: Product the dataset is part of
:param Product product: Product the dataset is part of
:param list[:class:`Dataset`] sources: datasets used to produce the dataset
:param Geometry extent: extent of the dataset
:param Geometry valid_data: extent of the valid data
Expand Down
10 changes: 5 additions & 5 deletions datacube/scripts/ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import datacube
from datacube.api.core import Datacube
from datacube.index import Index
from datacube.model import DatasetType, Range, Measurement, IngestorConfig
from datacube.model import Product, Range, Measurement, IngestorConfig
from datacube.utils import geometry
from datacube.model.utils import make_dataset, xr_apply, datasets_to_doc
from datacube.ui import click as ui
Expand Down Expand Up @@ -57,7 +57,7 @@ def morph_dataset_type(source_type, config, index, storage_format):
if 'metadata_type' in config:
output_metadata_type = index.metadata_types.get_by_name(config['metadata_type'])

output_type = DatasetType(output_metadata_type, deepcopy(source_type.definition))
output_type = Product(output_metadata_type, deepcopy(source_type.definition))
output_type.definition['name'] = config['output_type']
output_type.definition['managed'] = True
output_type.definition['description'] = config['description']
Expand Down Expand Up @@ -151,7 +151,7 @@ def get_resampling(config):
def ensure_output_type(index: Index,
config: dict,
storage_format: str,
allow_product_changes: bool = False) -> Tuple[DatasetType, DatasetType]:
allow_product_changes: bool = False) -> Tuple[Product, Product]:
"""
Create the output product for the given ingest config if it doesn't already exist.

Expand All @@ -160,11 +160,11 @@ def ensure_output_type(index: Index,
"""
source_type = index.products.get_by_name(config['source_type'])
if not source_type:
click.echo("Source DatasetType %s does not exist" % config['source_type'])
click.echo("Source Product %s does not exist" % config['source_type'])
click.get_current_context().exit(1)

output_type = morph_dataset_type(source_type, config, index, storage_format)
_LOG.info('Created DatasetType %s', output_type.name)
_LOG.info('Created Product %s', output_type.name)

existing = index.products.get_by_name(output_type.name)
if existing:
Expand Down
4 changes: 2 additions & 2 deletions datacube/testutils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from datacube.model import Measurement
from datacube.utils.dates import mk_time_coord
from datacube.utils.documents import parse_yaml
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.model import Dataset, Product, MetadataType
from datacube.ui.common import get_metadata_path
from datacube.utils import read_documents, SimpleDocNav
from datacube.utils.geometry import GeoBox, CRS
Expand Down Expand Up @@ -206,7 +206,7 @@ def mk_measurement(m):
if load is not None:
definition['load'] = load

return DatasetType(metadata_type, definition)
return Product(metadata_type, definition)


def mk_sample_dataset(bands,
Expand Down
2 changes: 0 additions & 2 deletions datacube/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
validate_document,
NoDatesSafeLoader,
get_doc_offset,
get_doc_offset_safe,
netcdf_extract_string,
without_lineage_sources,
schema_validated,
Expand Down Expand Up @@ -61,7 +60,6 @@
"validate_document",
"NoDatesSafeLoader",
"get_doc_offset",
"get_doc_offset_safe",
"netcdf_extract_string",
"without_lineage_sources",
"unsqueeze_data_array",
Expand Down
Loading