diff --git a/TM1py/Services/CellService.py b/TM1py/Services/CellService.py index b093dec0..71444d45 100644 --- a/TM1py/Services/CellService.py +++ b/TM1py/Services/CellService.py @@ -11,7 +11,7 @@ from concurrent.futures.thread import ThreadPoolExecutor from contextlib import suppress from io import StringIO -from typing import List, Union, Dict, Iterable, Tuple, Optional +from typing import List, Union, Dict, Iterable, Tuple, Optional, Any import ijson from mdxpy import MdxHierarchySet, MdxBuilder, Member, MdxTuple @@ -2240,21 +2240,24 @@ def execute_mdx_csv(self, mdx: Union[str, MdxBuilder], top: int = None, skip: in cellset_id=cellset_id, top=top, skip=skip, skip_zeros=skip_zeros, skip_rule_derived_cells=skip_rule_derived_cells, skip_consolidated_cells=skip_consolidated_cells, csv_dialect=csv_dialect, line_separator=line_separator, value_separator=value_separator, - sandbox_name=sandbox_name, include_attributes=include_attributes, mdx_headers=mdx_headers, **kwargs) + sandbox_name=sandbox_name, include_attributes=include_attributes, mdx_headers=mdx_headers, + **kwargs) return self.extract_cellset_csv( cellset_id=cellset_id, top=top, skip=skip, skip_zeros=skip_zeros, skip_rule_derived_cells=skip_rule_derived_cells, skip_consolidated_cells=skip_consolidated_cells, csv_dialect=csv_dialect, line_separator=line_separator, value_separator=value_separator, sandbox_name=sandbox_name, include_attributes=include_attributes, - use_compact_json=use_compact_json, mdx_headers=mdx_headers, **kwargs) + use_compact_json=use_compact_json, mdx_headers=mdx_headers, + **kwargs) def execute_view_csv(self, cube_name: str, view_name: str, private: bool = False, top: int = None, skip: int = None, skip_zeros: bool = True, skip_consolidated_cells: bool = False, skip_rule_derived_cells: bool = False, csv_dialect: 'csv.Dialect' = None, line_separator: str = "\r\n", value_separator: str = ",", sandbox_name: str = None, use_iterative_json: bool = False, use_compact_json: bool = False, use_blob: bool = False, - arranged_axes: Tuple[List, List, List] = None, mdx_headers: bool = False, **kwargs) -> str: + arranged_axes: Tuple[List, List, List] = None, mdx_headers: bool = False, + **kwargs) -> str: """ Optimized for performance. Get csv string of coordinates and values. :param cube_name: String, name of the cube @@ -2278,7 +2281,7 @@ def execute_view_csv(self, cube_name: str, view_name: str, private: bool = False Allows function to skip retrieval of cellset composition. E.g.: arranged_axes=(["Year"], ["Region","Product"], ["Period", "Version"]) :param mdx_headers: boolean, fully qualified hierarchy name as header instead of simple dimension name - :return: String + :return: dict, String """ if use_blob: if use_iterative_json: @@ -2360,6 +2363,10 @@ def execute_mdx_dataframe(self, mdx: Union[str, MdxBuilder], top: int = None, sk sandbox_name: str = None, include_attributes: bool = False, use_iterative_json: bool = False, use_compact_json: bool = False, use_blob: bool = False, shaped: bool = False, mdx_headers: bool = False, + fillna_numeric_attributes: bool = False, + fillna_numeric_attributes_value: Any = 0, + fillna_string_attributes: bool = False, + fillna_string_attributes_value: Any = '', **kwargs) -> 'pd.DataFrame': """ Optimized for performance. Get Pandas DataFrame from MDX Query. @@ -2383,13 +2390,28 @@ def execute_mdx_dataframe(self, mdx: Union[str, MdxBuilder], top: int = None, sk :param use_blob: Has better performance on datasets > 1M cells and lower memory footprint in any case. :param shaped: preserve shape of view/mdx in data frame :param mdx_headers: boolean, fully qualified hierarchy name as header instead of simple dimension name + :param fillna_numeric_attributes: boolean, fills empty numerical attributes with fillna_numeric_attributes_value + :param fillna_string_attributes: boolean, fills empty string attributes with fillna_string_attributes_value + :param fillna_numeric_attributes_value: Any, value with which to replace na if fillna_numeric_attributes is True + :param fillna_string_attributes_value: Any, value with which to replace na if fillna_string_attributes is True :return: Pandas Dataframe """ + if (fillna_numeric_attributes or fillna_string_attributes) and not include_attributes: + raise ValueError('Include attributes must be True if fillna_numeric or fillna_string is True.') + # necessary to assure column order in line with cube view if shaped: skip_zeros = False if use_blob: + if any([ + fillna_numeric_attributes, + fillna_numeric_attributes_value, + fillna_string_attributes, + fillna_string_attributes_value] + ): + raise ValueError("fillna attributes' feature must not be used with use_blob as True") + raw_csv = self.execute_mdx_csv( mdx=mdx, top=top, @@ -2412,7 +2434,12 @@ def execute_mdx_dataframe(self, mdx: Union[str, MdxBuilder], top: int = None, sk skip_rule_derived_cells=skip_rule_derived_cells, sandbox_name=sandbox_name, include_attributes=include_attributes, use_iterative_json=use_iterative_json, use_compact_json=use_compact_json, - shaped=shaped, mdx_headers=mdx_headers, **kwargs) + shaped=shaped, mdx_headers=mdx_headers, + fillna_numeric_attributes=fillna_numeric_attributes, + fillna_numeric_attributes_value=fillna_numeric_attributes_value, + fillna_string_attributes=fillna_string_attributes, + fillna_string_attributes_value=fillna_string_attributes_value, + **kwargs) @require_pandas def execute_mdx_dataframe_async(self, mdx_list: List[Union[str, MdxBuilder]], max_workers: int = 8, @@ -3158,7 +3185,7 @@ def extract_cellset_raw( skip_contexts=skip_contexts, include_hierarchies=include_hierarchies, sandbox_name=sandbox_name, - **kwargs) + **{**kwargs, 'delete_cellset': False}) cells = self.extract_cellset_cells_raw(cellset_id=cellset_id, cell_properties=cell_properties, top=top, @@ -3173,6 +3200,7 @@ def extract_cellset_raw( # Combine metadata and cells back into a single object return {**metadata, **cells} + @tidy_cellset def extract_cellset_metadata_raw( self, cellset_id: str, @@ -3183,6 +3211,7 @@ def extract_cellset_metadata_raw( skip_contexts: bool = False, include_hierarchies: bool = False, sandbox_name: str = None, + delete_cellset: bool = False, **kwargs): # select Name property if member_properties is None or empty. @@ -3618,6 +3647,7 @@ def extract_cellset_composition( url = "/Cellsets('{}')?$expand=" \ "Cube($select=Name)," \ "Axes($expand=Hierarchies($select=UniqueName))".format(cellset_id) + url = add_url_parameters(url, **{"!sandbox": sandbox_name}) response = self._rest.GET(url=url, **kwargs) response_json = response.json() @@ -3686,15 +3716,13 @@ def extract_cellset_csv( :param mdx_headers: boolean. Fully qualified hierarchy name as header instead of simple dimension name :return: Raw format from TM1. """ - if 'delete_cellset' in kwargs: - delete_cellset = kwargs.pop('delete_cellset') - else: - delete_cellset = True + delete_cellset = kwargs.pop('delete_cellset', True) - _, _, rows, columns = self.extract_cellset_composition( + cube, _, rows, columns = self.extract_cellset_composition( cellset_id, delete_cellset=False, - sandbox_name=sandbox_name, **kwargs) + sandbox_name=sandbox_name, + **kwargs) cellset_dict = self.extract_cellset_raw( cellset_id, @@ -3804,6 +3832,7 @@ def extract_cellset_csv_iter_json( 'Axes.item.Ordinal'] attributes_prefixes = set() + attributes_by_dimension = None if include_attributes: attributes_by_dimension = self._get_attributes_by_dimension(cube) for _, attributes in attributes_by_dimension.items(): @@ -3903,6 +3932,10 @@ def extract_cellset_dataframe( use_compact_json: bool = False, shaped: bool = False, mdx_headers: bool = False, + fillna_numeric_attributes: bool = False, + fillna_numeric_attributes_value: Any = 0, + fillna_string_attributes: bool = False, + fillna_string_attributes_value: Any = '', **kwargs) -> 'pd.DataFrame': """ Build pandas data frame from cellset_id @@ -3934,9 +3967,61 @@ def extract_cellset_dataframe( cellset_id=cellset_id, top=top, skip=skip, skip_zeros=skip_zeros, skip_rule_derived_cells=skip_rule_derived_cells, skip_consolidated_cells=skip_consolidated_cells, value_separator='~', sandbox_name=sandbox_name, include_attributes=include_attributes, - use_compact_json=use_compact_json, mdx_headers=mdx_headers, **kwargs) + use_compact_json=use_compact_json, mdx_headers=mdx_headers, + # dont delete cellset if attribute types must be retrieved later + delete_cellset=not any([fillna_string_attributes, fillna_string_attributes]), **kwargs) + + attribute_types_by_dimension = None + if fillna_string_attributes or fillna_string_attributes: + attribute_types_by_dimension = self._extract_attribute_types_by_dimension( + cellset_id=cellset_id, + sandbox_name=sandbox_name, + delete_cellset=True, + **kwargs) + + return build_dataframe_from_csv(raw_csv, sep="~", shaped=shaped, + fillna_numeric_attributes=fillna_numeric_attributes, + fillna_string_attributes=fillna_string_attributes, + fillna_numeric_attributes_value=fillna_numeric_attributes_value, + fillna_string_attributes_value=fillna_string_attributes_value, + attribute_types_by_dimension=attribute_types_by_dimension, **kwargs) + + def _extract_attribute_types_by_dimension(self, cellset_id: str, sandbox_name: str, delete_cellset: bool, **kwargs): + attribute_types_by_dimension = {} - return build_dataframe_from_csv(raw_csv, sep="~", shaped=shaped, **kwargs) + _, _, rows, columns = self.extract_cellset_composition( + cellset_id, + delete_cellset=False, + sandbox_name=sandbox_name, **kwargs) + + metadata = self.extract_cellset_metadata_raw( + cellset_id=cellset_id, + elem_properties=['Name'], + member_properties=['Name', 'Attributes'], + top=1, + skip=0, + skip_contexts=True, + include_hierarchies=False, + sandbox_name=sandbox_name, + delete_cellset=delete_cellset, + **kwargs) + # gets the attribute names from the first member from the first tuple of each axis. + attributes_by_dimension = dict(zip( + rows + columns, + [list(member['Attributes'].keys()) for axes in metadata['Axes'][::-1] for member in + axes['Tuples'][0]['Members']])) + element_service = self.get_element_service() + for dimension in rows + columns: + attribute_types_by_dimension[dimension] = element_service.get_element_types( + '}ElementAttributes_' + dimension.split('].[')[0][1:], + '}ElementAttributes_' + dimension.split('].[')[0][1:]) + + attribute_types_by_dimension[dimension] = { + attribute_name: attribute_type for attribute_name, attribute_type in + attribute_types_by_dimension[dimension].items() + if attribute_name in attributes_by_dimension[dimension]} + + return attribute_types_by_dimension @tidy_cellset @require_pandas diff --git a/TM1py/Utils/Utils.py b/TM1py/Utils/Utils.py index 08e91b51..ddf55aa3 100644 --- a/TM1py/Utils/Utils.py +++ b/TM1py/Utils/Utils.py @@ -533,7 +533,13 @@ def build_csv_from_cellset_dict( return csv_content.getvalue().strip() -def build_dataframe_from_csv(raw_csv, sep='~', shaped: bool = False, **kwargs) -> 'pd.DataFrame': +def build_dataframe_from_csv(raw_csv, sep='~', shaped: bool = False, + fillna_numeric_attributes: bool = False, + fillna_numeric_attributes_value: Any = 0, + fillna_string_attributes: bool = False, + fillna_string_attributes_value: Any = '', + attribute_types_by_dimension: Dict[str, Dict[str, str]] | None = None, + **kwargs) -> 'pd.DataFrame': if not raw_csv: return pd.DataFrame() @@ -542,11 +548,34 @@ def build_dataframe_from_csv(raw_csv, sep='~', shaped: bool = False, **kwargs) - kwargs['dtype'] = {'Value': None, **{col: str for col in range(999)}} try: df = pd.read_csv(StringIO(raw_csv), sep=sep, na_values=["", None], keep_default_na=False, **kwargs) + except ValueError: # retry with dtype 'str' for results with a mixed value column kwargs['dtype'] = {'Value': str, **{col: str for col in range(999)}} df = pd.read_csv(StringIO(raw_csv), sep=sep, na_values=["", None], keep_default_na=False, **kwargs) + if fillna_numeric_attributes: + fill_numeric_bool_list = [attr_type.lower() == 'numeric' for dimension, attributes in + attribute_types_by_dimension.items() + for attr_type in [dimension] + list(attributes.values())] + fill_numeric_bool_list += [False] # for the value column + df = df.apply( + lambda col: + col.fillna(fillna_numeric_attributes_value) if fill_numeric_bool_list[ + list(df.columns.values).index(col.name)] else col, + axis=0) + + if fillna_string_attributes: + fill_string_bool_list = [attr_type.lower() == 'string' for dimension, attributes in + attribute_types_by_dimension.items() + for attr_type in [dimension] + list(attributes.values())] + fill_string_bool_list += [False] # for the value column + df = df.apply( + lambda col: + col.fillna(fillna_string_attributes_value) if fill_string_bool_list[ + list(df.columns.values).index(col.name)] else col, + axis=0) + if not shaped: return df diff --git a/Tests/CellService_test.py b/Tests/CellService_test.py index 2d8620a1..022ba6a7 100644 --- a/Tests/CellService_test.py +++ b/Tests/CellService_test.py @@ -2,11 +2,11 @@ import unittest from pathlib import Path -from mdxpy import CalculatedMember, MdxBuilder, MdxHierarchySet, Member +from mdxpy import CalculatedMember, MdxBuilder, MdxHierarchySet, Member, DimensionProperty from TM1py import Sandbox from TM1py.Exceptions.Exceptions import TM1pyException, TM1pyVersionException, TM1pyWritePartialFailureException, \ - TM1pyWriteFailureException + TM1pyWriteFailureException, TM1pyRestException from TM1py.Objects import (AnonymousSubset, Cube, Dimension, Element, ElementAttribute, Hierarchy, MDXView, NativeView) from TM1py.Services import TM1Service @@ -104,15 +104,8 @@ def setUpClass(cls): cls.tm1.dimensions.update(dimension) else: cls.tm1.dimensions.update_or_create(dimension) - attribute_cube = "}ElementAttributes_" + dimension_name - attribute_values = {} - for element in elements: - attribute_values[(element.name, "Attr1")] = "TM1py" - attribute_values[(element.name, "Attr2")] = "2" - attribute_values[(element.name, "Attr3")] = "3" - attribute_values[(element.name, "NA")] = "4" - cls.tm1.cells.write_values(attribute_cube, attribute_values) + cls._write_attribute_values() # Build Cube cube = Cube(cls.cube_name, cls.dimension_names) @@ -181,6 +174,22 @@ def setUpClass(cls): cls.create_cube_with_five_dimensions() + @classmethod + def _write_attribute_values(cls): + for dimension_name in cls.dimension_names: + elements = [ + Element('Element {}'.format(str(j)), 'Numeric') + for j + in range(1, 1001)] + attribute_cube = "}ElementAttributes_" + dimension_name + attribute_values = {} + for element in elements: + attribute_values[(element.name, "Attr1")] = "TM1py" if element.name != 'Element 2' else '' + attribute_values[(element.name, "Attr2")] = "2" + attribute_values[(element.name, "Attr3")] = "3" + attribute_values[(element.name, "NA")] = "4" + cls.tm1.cells.write(attribute_cube, attribute_values, use_blob=True) + @classmethod def setUp(cls): """ @@ -208,6 +217,9 @@ def setUp(cls): if not cls.tm1.sandboxes.exists(cls.sandbox_name): cls.tm1.sandboxes.create(Sandbox(cls.sandbox_name, True)) + cls._write_attribute_values() + + @classmethod def tearDown(cls): """ @@ -399,8 +411,9 @@ def test_write_and_get_value_hierarchy(self): original_value = self.tm1.cells.get_value(self.cube_name, 'Element1,EleMent2,ELEMENT 3') response = self.tm1.cells.write_value(4, self.cube_name, ('element1', 'ELEMENT 2', 'EleMent 3')) self.assertTrue(response.ok) - value = self.tm1.cells.get_value(self.cube_name, - f'{self.dimension_names[0]}::Element1,EleMent2,{self.dimension_names[2]}::ELEMENT 3') + value = self.tm1.cells.get_value( + self.cube_name, + f'{self.dimension_names[0]}::Element1,EleMent2,{self.dimension_names[2]}::ELEMENT 3') self.assertEqual(value, 4) self.tm1.cells.write_value(original_value, self.cube_name, ('element1', 'ELEMENT 2', 'EleMent 3')) @@ -1965,7 +1978,11 @@ def test_execute_mdx_raw_with_member_properties_with_elem_properties(self): self.assertIn("Attr1", element["Attributes"]) self.assertIn("Attr2", element["Attributes"]) self.assertNotIn("Attr3", element["Attributes"]) - self.assertEqual(element["Attributes"]["Attr1"], "TM1py") + # Element 2 is special (see setUp function) + if element["Name"] == "Element 2": + self.assertEqual(element["Attributes"]["Attr1"], None) + else: + self.assertEqual(element["Attributes"]["Attr1"], "TM1py") self.assertEqual(element["Attributes"]["Attr2"], 2) def test_execute_mdx_raw_with_member_properties_without_elem_properties(self): @@ -2759,7 +2776,7 @@ def test_execute_mdx_dataframe_include_attributes(self): 'Attr2': {0: 2}, 'TM1py_Tests_Cell_Dimension1': {0: 'Element 1'}, 'Attr1': {0: 'TM1py'}, - 'Value': {0: 1.0}} + 'Value': {0: 1}} self.assertEqual(expected, df.to_dict()) @@ -3414,7 +3431,10 @@ def test_execute_view_raw_with_member_properties_without_elem_properties(self): self.assertIn("Attr1", member["Attributes"]) self.assertIn("Attr2", member["Attributes"]) self.assertNotIn("Attr3", member["Attributes"]) - self.assertEqual(member["Attributes"]["Attr1"], "TM1py") + if member["Name"] == "Element 2": + self.assertEqual(member["Attributes"]["Attr1"], None) + else: + self.assertEqual(member["Attributes"]["Attr1"], "TM1py") self.assertEqual(member["Attributes"]["Attr2"], 2) def test_execute_view_raw_with_elem_properties_without_member_properties(self): @@ -3447,7 +3467,10 @@ def test_execute_view_raw_with_elem_properties_without_member_properties(self): self.assertIn("Attr1", element["Attributes"]) self.assertIn("Attr2", element["Attributes"]) self.assertNotIn("Attr3", element["Attributes"]) - self.assertEqual(element["Attributes"]["Attr1"], "TM1py") + if element["Name"] == "Element 2": + self.assertEqual(element["Attributes"]["Attr1"], None) + else: + self.assertEqual(element["Attributes"]["Attr1"], "TM1py") self.assertEqual(element["Attributes"]["Attr2"], 2) self.assertNotIn("Type", member) self.assertNotIn("UniqueName", member) @@ -3483,7 +3506,10 @@ def test_execute_view_with_elem_properties_with_member_properties(self): self.assertIn("Attr1", member["Attributes"]) self.assertIn("Attr2", member["Attributes"]) self.assertNotIn("Attr3", member["Attributes"]) - self.assertEqual(member["Attributes"]["Attr1"], "TM1py") + if member["Name"] == "Element 2": + self.assertEqual(member["Attributes"]["Attr1"], None) + else: + self.assertEqual(member["Attributes"]["Attr1"], "TM1py") self.assertEqual(member["Attributes"]["Attr2"], 2) element = member["Element"] self.assertIn("Name", element) @@ -3491,7 +3517,10 @@ def test_execute_view_with_elem_properties_with_member_properties(self): self.assertIn("Attr1", element["Attributes"]) self.assertIn("Attr2", element["Attributes"]) self.assertNotIn("Attr3", element["Attributes"]) - self.assertEqual(element["Attributes"]["Attr1"], "TM1py") + if element["Name"] == "Element 2": + self.assertEqual(element["Attributes"]["Attr1"], None) + else: + self.assertEqual(element["Attributes"]["Attr1"], "TM1py") self.assertEqual(element["Attributes"]["Attr2"], 2) def test_execute_view_raw_with_top(self): @@ -3507,7 +3536,7 @@ def test_execute_view_raw_with_top(self): def test_execute_view_values(self): cell_values = self.tm1.cells.execute_view_values(cube_name=self.cube_name, view_name=self.view_name, - private=False) + private=False) # check type self.assertIsInstance(cell_values, list) @@ -3992,8 +4021,8 @@ def test_write_values_through_cellset_deactivate_transaction_log_reactivate_tran @skip_if_deprecated_in_version(version='12') def test_deactivate_transaction_log(self): self.tm1.cells.write_value(value="YES", - cube_name="}CubeProperties", - element_tuple=(self.cube_name, "Logging")) + cube_name="}CubeProperties", + element_tuple=(self.cube_name, "Logging")) self.tm1.cells.deactivate_transactionlog(self.cube_name) value = self.tm1.cells.get_value("}CubeProperties", "{},LOGGING".format(self.cube_name)) self.assertEqual("NO", value.upper()) @@ -4001,8 +4030,8 @@ def test_deactivate_transaction_log(self): @skip_if_deprecated_in_version(version='12') def test_activate_transaction_log(self): self.tm1.cells.write_value(value="NO", - cube_name="}CubeProperties", - element_tuple=(self.cube_name, "Logging")) + cube_name="}CubeProperties", + element_tuple=(self.cube_name, "Logging")) self.tm1.cells.activate_transactionlog(self.cube_name) value = self.tm1.cells.get_value("}CubeProperties", "{},LOGGING".format(self.cube_name)) self.assertEqual("YES", value.upper()) @@ -4010,7 +4039,7 @@ def test_activate_transaction_log(self): def test_read_write_with_custom_encoding(self): coordinates = ("d1e1", "d2e2", "d3e3") self.tm1.cells.write_values(self.string_cube_name, {coordinates: self.latin_1_encoded_text}, - encoding="latin-1") + encoding="latin-1") mdx = MdxBuilder.from_cube(self.string_cube_name) \ .add_hierarchy_set_to_column_axis( @@ -4027,7 +4056,7 @@ def test_read_write_with_custom_encoding(self): def test_read_write_with_custom_encoding_fail_response_encoding(self): coordinates = ("d1e1", "d2e2", "d3e3") self.tm1.cells.write_values(self.string_cube_name, {coordinates: self.latin_1_encoded_text}, - encoding="latin-1") + encoding="latin-1") mdx = MdxBuilder.from_cube(self.string_cube_name) \ .add_hierarchy_set_to_column_axis( @@ -4728,9 +4757,9 @@ def test_extract_cellset_axes_raw_async_without_rows(self): cellset_id = self.tm1.cells.create_cellset(mdx=mdx) data_async0 = self.tm1.cells.extract_cellset_axes_raw_async(cellset_id=cellset_id, async_axis=0) - data = self.tm1.cells.extract_cellset_metadata_raw(cellset_id=cellset_id) - self.assertEqual( - data['Axes'], data_async0['Axes']) + data = self.tm1.cells.extract_cellset_metadata_raw(cellset_id=cellset_id, delete_cellset=False) + self.assertEqual(data['Axes'], data_async0['Axes']) + print('axes empty row', len(data['Axes'])) with self.assertRaises(ValueError) as _: self.tm1.cells.extract_cellset_axes_raw_async(cellset_id=cellset_id) @@ -4755,6 +4784,10 @@ def test_extract_cellset_axes_raw_async_with_empty_columns(self): self.assertEqual( data['Axes'], data_async1['Axes']) + # verify cellset deletion + with self.assertRaises(TM1pyRestException): + self.tm1.cells.extract_cellset_cellcount(cellset_id) + def test_extract_cellset_axes_raw_async_with_member_properties_with_elem_properties(self): mdx = MdxBuilder.from_cube(self.cube_name) \ .rows_non_empty() \ @@ -4770,19 +4803,23 @@ def test_extract_cellset_axes_raw_async_with_member_properties_with_elem_propert elem_properties = ["Name", "UniqueName", "Attributes/Attr1", "Attributes/Attr2"] member_properties = ["Name", "Ordinal", "Weight"] data_async0 = self.tm1.cells.extract_cellset_axes_raw_async(cellset_id=cellset_id, async_axis=0, - elem_properties=elem_properties, - member_properties=member_properties) + elem_properties=elem_properties, + member_properties=member_properties) data_async1 = self.tm1.cells.extract_cellset_axes_raw_async(cellset_id=cellset_id, - elem_properties=elem_properties, - member_properties=member_properties) + elem_properties=elem_properties, + member_properties=member_properties) data = self.tm1.cells.extract_cellset_metadata_raw(cellset_id=cellset_id, - elem_properties=elem_properties, - member_properties=member_properties) + elem_properties=elem_properties, + member_properties=member_properties) self.assertEqual( data['Axes'], data_async0['Axes']) self.assertEqual( data['Axes'], data_async1['Axes']) + # verify cellset deletion + with self.assertRaises(TM1pyRestException): + self.tm1.cells.extract_cellset_cellcount(cellset_id) + def test_extract_cellset_cells_raw_async(self): mdx = MdxBuilder.from_cube(self.cube_name) \ .rows_non_empty() \ @@ -4814,9 +4851,9 @@ def test_extract_cellset_cells_raw_async_with_cell_properties(self): cellset_id = self.tm1.cells.create_cellset(mdx=mdx) cell_properties = ['Value', 'Updateable', 'Consolidated', 'RuleDerived'] data_async = self.tm1.cells.extract_cellset_cells_raw_async(cellset_id=cellset_id, - cell_properties=cell_properties) + cell_properties=cell_properties) data = self.tm1.cells.extract_cellset_cells_raw(cellset_id=cellset_id, - cell_properties=cell_properties) + cell_properties=cell_properties) self.assertEqual( data['Cells'], data_async['Cells']) @@ -4841,6 +4878,56 @@ def test_extract_cellset_cells_raw_async_skip_consolidated(self): self.assertEqual( data['Cells'], data_async['Cells']) + def test_empty_dimension_attribute_as_string(self): + + mdx = MdxBuilder.from_cube(self.cube_name).rows_non_empty() + + for dim in self.dimension_names[:-1]: + mdx.add_hierarchy_set_to_row_axis( + MdxHierarchySet.members([Member.of(dim, dim, "Element 8"), Member.of(dim, dim, "Element 9")])) + mdx.add_properties_to_row_axis(DimensionProperty(dim, dim, 'Attr1')) + mdx.add_properties_to_row_axis(DimensionProperty(dim, dim, 'Attr2')) + mdx.add_properties_to_row_axis(DimensionProperty(dim, dim, 'Attr3')) + mdx.add_properties_to_row_axis(DimensionProperty(dim, dim, 'NA')) + + mdx.add_hierarchy_set_to_column_axis(MdxHierarchySet.all_members( + self.dimension_names[-1], + self.dimension_names[-1])) + mdx.add_properties_to_column_axis( + DimensionProperty(self.dimension_names[-1], self.dimension_names[-1], 'Attr1')) + mdx.add_properties_to_column_axis( + DimensionProperty(self.dimension_names[-1], self.dimension_names[-1], 'Attr2')) + mdx.add_properties_to_column_axis( + DimensionProperty(self.dimension_names[-1], self.dimension_names[-1], 'Attr3')) + mdx.add_properties_to_column_axis(DimensionProperty(self.dimension_names[-1], self.dimension_names[-1], 'NA')) + + self.tm1.cells.write( + cube_name='}ElementAttributes_' + self.dimension_names[0], + cellset_as_dict={('Element 8', 'Attr1'): ''}) + self.tm1.cells.write( + cube_name='}ElementAttributes_' + self.dimension_names[0], + cellset_as_dict={('Element 8', 'Attr2'): 0}) + self.tm1.cells.write( + cube_name='}ElementAttributes_' + self.dimension_names[0], + cellset_as_dict={('Element 9', 'Attr1'): 'TM1py'}) + self.tm1.cells.write( + cube_name='}ElementAttributes_' + self.dimension_names[0], + cellset_as_dict={('Element 9', 'Attr2'): 123}) + + df = self.tm1.cells.execute_mdx_dataframe( + mdx=mdx.to_mdx(), + fillna_numeric_attributes=True, + fillna_numeric_attributes_value=888, + fillna_string_attributes=True, + fillna_string_attributes_value='Nothing', + include_attributes=True) + + self.assertEqual('Nothing', df.loc[0, 'Attr1']) + self.assertEqual('TM1py', df.loc[1, 'Attr1']) + + self.assertEqual(888, df.loc[0, 'Attr2']) + self.assertEqual('123', df.loc[1, 'Attr2']) + # Delete Cube and Dimensions @classmethod def tearDownClass(cls):