From 3e743a32a41791b9daa2eca64aa14665e717e1ef Mon Sep 17 00:00:00 2001 From: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Thu, 13 Oct 2022 21:14:26 -0400 Subject: [PATCH 01/24] Parse uncertain numbers e.g. (1.0+/-0.2)e+03 Enable Pint to consume uncertain quantities. Signed-off-by: 72577720+MichaelTiemannOSC@users.noreply.github.com --- pint/pint_eval.py | 118 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 106 insertions(+), 12 deletions(-) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index 2054260b4..c48f4f66c 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -12,11 +12,13 @@ import operator import token as tokenlib import tokenize +from uncertainties import ufloat from .errors import DefinitionSyntaxError # For controlling order of operations _OP_PRIORITY = { + "+/-": 4, "**": 3, "^": 3, "unary": 2, @@ -30,6 +32,10 @@ } +def _ufloat(left, right): + return ufloat(left, right) + + def _power(left, right): from . import Quantity from .compat import is_duck_array @@ -46,6 +52,7 @@ def _power(left, right): _BINARY_OPERATOR_MAP = { + "+/-": _ufloat, "**": _power, "*": operator.mul, "": operator.mul, # operator for implicit ops @@ -117,6 +124,7 @@ def evaluate(self, define_op, bin_op=None, un_op=None): # unary operator op_text = self.operator[1] if op_text not in un_op: + breakpoint() raise DefinitionSyntaxError('missing unary operator "%s"' % op_text) return un_op[op_text](self.left.evaluate(define_op, bin_op, un_op)) else: @@ -163,6 +171,12 @@ def build_eval_tree( tokens = list(tokens) result = None + + def _number_or_nan(token): + if (token.type==tokenlib.NUMBER + or (token.type==tokenlib.NAME and token.string=='nan')): + return True + return False while True: current_token = tokens[index] @@ -182,18 +196,72 @@ def build_eval_tree( # parenthetical group ending, but we need to close sub-operations within group return result, index - 1 elif token_text == "(": - # gather parenthetical group - right, index = build_eval_tree( - tokens, op_priority, index + 1, 0, token_text - ) - if not tokens[index][1] == ")": - raise DefinitionSyntaxError("weird exit from parentheses") - if result: - # implicit op with a parenthetical group, i.e. "3 (kg ** 2)" - result = EvalTreeNode(left=result, right=right) + # a ufloat is of the form `( nominal_value + / - std ) possible_e_notation` and parses as a NUMBER + # alas, we cannot simply consume the nominal_value and then see the +/- operator, because naive + # parsing on the nominal_value thinks it needs to eval the + as part of the nominal_value. + if (index+6 < len(tokens) + and _number_or_nan(tokens[index+1]) + and tokens[index+2].string=='+' + and tokens[index+3].string=='/' + and tokens[index+4].string=='-' + and _number_or_nan(tokens[index+5])): + # breakpoint() + # get nominal_value + left, _ = build_eval_tree( + # This should feed the parser only a single token--the number representing the nominal_value + [tokens[index+1], tokens[-1]], op_priority, 0, 0, tokens[index+1].string + ) + plus_minus_line = tokens[index].line[tokens[index].start[1]:tokens[index+6].end[1]] + plus_minus_start = tokens[index+2].start + plus_minus_end = tokens[index+4].end + plus_minus_operator = tokenize.TokenInfo(type=tokenlib.OP, string='+/-', start=plus_minus_start, end=plus_minus_end, line=plus_minus_line) + remaining_line = tokens[index].line[tokens[index+6].end[1]:] + + right, _ = build_eval_tree( + [tokens[index+5], tokens[-1]], op_priority, 0, 0, tokens[index+5].string + ) + if tokens[index+6].string==')': + # consume the uncertainty number seen thus far + index += 6 + else: + raise DefinitionSyntaxError("weird exit from ufloat construction") + # now look for possible scientific e-notation + if (index+4 < len(tokens) + and tokens[index+1].string=='e' + and tokens[index+2].string in ['+', '-'] + and tokens[index+3].type==tokenlib.NUMBER): + # There may be more NUMBERS that follow because the tokenizer is lost. + # So pick them all up + for exp_number_end in range(index+4, len(tokens)): + if tokens[exp_number_end].type != tokenlib.NUMBER: + break + e_notation_line = remaining_line[:tokens[exp_number_end].start[1]-tokens[index+1].start[1]] + exp_number = '1.0e' + ''.join([digit.string for digit in tokens[index+3:exp_number_end]]) + exp_number_token = tokenize.TokenInfo(type=tokenlib.NUMBER, string=exp_number, start=(1, 0), end=(1, len(exp_number)), line=exp_number) + e_notation_operator = tokenize.TokenInfo(type=tokenlib.OP, string='*', start=(1, 0), end=(1, 1), line='*') + e_notation_scale, _ = build_eval_tree([exp_number_token, tokens[-1]], op_priority, 0, 0, tokens[exp_number_end].string) + scaled_left = EvalTreeNode(left, e_notation_operator, e_notation_scale) + scaled_right = EvalTreeNode(right, e_notation_operator, e_notation_scale) + result = EvalTreeNode(scaled_left, plus_minus_operator, scaled_right) + index = exp_number_end + # We know we are not at an ENDMARKER here + continue + else: + result = EvalTreeNode(left, plus_minus_operator, right) + # We can fall through...index+=1 operation will consume ')' else: - # get first token - result = right + # gather parenthetical group + right, index = build_eval_tree( + tokens, op_priority, index + 1, 0, token_text + ) + if not tokens[index][1] == ")": + raise DefinitionSyntaxError("weird exit from parentheses") + if result: + # implicit op with a parenthetical group, i.e. "3 (kg ** 2)" + result = EvalTreeNode(left=result, right=right) + else: + # get first token + result = right elif token_text in op_priority: if result: # equal-priority operators are grouped in a left-to-right order, @@ -221,7 +289,33 @@ def build_eval_tree( ) result = EvalTreeNode(left=right, operator=current_token) elif token_type == tokenlib.NUMBER or token_type == tokenlib.NAME: - if result: + # a ufloat could be naked, meaning `nominal_value + / - std` and parses as a NUMBER + # alas, we cannot simply consume the nominal_value and then see the +/- operator, because naive + # parsing on the nominal_value thinks it needs to eval the + as part of the nominal_value. + if (index+4 < len(tokens) + and _number_or_nan(tokens[index]) + and tokens[index+1].string=='+' + and tokens[index+2].string=='/' + and tokens[index+3].string=='-' + and _number_or_nan(tokens[index+4])): + # The +/- operator binds tightest, so we don't need to end a previous binop + if tokens[index+5].type==tokenlib.NUMBER: + breakpoint() + # get nominal_value + left = EvalTreeNode(left=current_token) + plus_minus_line = tokens[index].line[tokens[index].start[1]:tokens[index+4].end[1]] + plus_minus_start = tokens[index+1].start + plus_minus_end = tokens[index+3].end + plus_minus_operator = tokenize.TokenInfo(type=tokenlib.OP, string='+/-', start=plus_minus_start, end=plus_minus_end, line=plus_minus_line) + remaining_line = tokens[index].line[tokens[index+4].end[1]:] + + right, _ = build_eval_tree( + [tokens[index+4], tokens[-1]], op_priority, 0, 0, tokens[index+4].string + ) + result = EvalTreeNode(left, plus_minus_operator, right) + index += 4 + continue + elif result: # tokens with an implicit operation i.e. "1 kg" if op_priority[""] <= op_priority.get(prev_op, -1): # previous operator is higher priority than implicit, so end From a54c5972b92b7bce46c40f248946a0e4444fe385 Mon Sep 17 00:00:00 2001 From: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sat, 15 Oct 2022 05:13:24 -0400 Subject: [PATCH 02/24] Fix problems identified by python -m pre_commit run --all-files Signed-off-by: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/pint_eval.py | 152 +++++++++++++++++++++++++++++++++------------- 1 file changed, 109 insertions(+), 43 deletions(-) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index c48f4f66c..634a901ac 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -12,6 +12,7 @@ import operator import token as tokenlib import tokenize + from uncertainties import ufloat from .errors import DefinitionSyntaxError @@ -171,10 +172,11 @@ def build_eval_tree( tokens = list(tokens) result = None - + def _number_or_nan(token): - if (token.type==tokenlib.NUMBER - or (token.type==tokenlib.NAME and token.string=='nan')): + if token.type == tokenlib.NUMBER or ( + token.type == tokenlib.NAME and token.string == "nan" + ): return True return False @@ -199,50 +201,100 @@ def _number_or_nan(token): # a ufloat is of the form `( nominal_value + / - std ) possible_e_notation` and parses as a NUMBER # alas, we cannot simply consume the nominal_value and then see the +/- operator, because naive # parsing on the nominal_value thinks it needs to eval the + as part of the nominal_value. - if (index+6 < len(tokens) - and _number_or_nan(tokens[index+1]) - and tokens[index+2].string=='+' - and tokens[index+3].string=='/' - and tokens[index+4].string=='-' - and _number_or_nan(tokens[index+5])): + if ( + index + 6 < len(tokens) + and _number_or_nan(tokens[index + 1]) + and tokens[index + 2].string == "+" + and tokens[index + 3].string == "/" + and tokens[index + 4].string == "-" + and _number_or_nan(tokens[index + 5]) + ): # breakpoint() # get nominal_value left, _ = build_eval_tree( # This should feed the parser only a single token--the number representing the nominal_value - [tokens[index+1], tokens[-1]], op_priority, 0, 0, tokens[index+1].string + [tokens[index + 1], tokens[-1]], + op_priority, + 0, + 0, + tokens[index + 1].string, ) - plus_minus_line = tokens[index].line[tokens[index].start[1]:tokens[index+6].end[1]] - plus_minus_start = tokens[index+2].start - plus_minus_end = tokens[index+4].end - plus_minus_operator = tokenize.TokenInfo(type=tokenlib.OP, string='+/-', start=plus_minus_start, end=plus_minus_end, line=plus_minus_line) - remaining_line = tokens[index].line[tokens[index+6].end[1]:] + plus_minus_line = tokens[index].line[ + tokens[index].start[1] : tokens[index + 6].end[1] + ] + plus_minus_start = tokens[index + 2].start + plus_minus_end = tokens[index + 4].end + plus_minus_operator = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=plus_minus_start, + end=plus_minus_end, + line=plus_minus_line, + ) + # remaining_line = tokens[index].line[tokens[index + 6].end[1] :] right, _ = build_eval_tree( - [tokens[index+5], tokens[-1]], op_priority, 0, 0, tokens[index+5].string + [tokens[index + 5], tokens[-1]], + op_priority, + 0, + 0, + tokens[index + 5].string, ) - if tokens[index+6].string==')': + if tokens[index + 6].string == ")": # consume the uncertainty number seen thus far index += 6 else: - raise DefinitionSyntaxError("weird exit from ufloat construction") + raise DefinitionSyntaxError( + "weird exit from ufloat construction" + ) # now look for possible scientific e-notation - if (index+4 < len(tokens) - and tokens[index+1].string=='e' - and tokens[index+2].string in ['+', '-'] - and tokens[index+3].type==tokenlib.NUMBER): + if ( + index + 4 < len(tokens) + and tokens[index + 1].string == "e" + and tokens[index + 2].string in ["+", "-"] + and tokens[index + 3].type == tokenlib.NUMBER + ): # There may be more NUMBERS that follow because the tokenizer is lost. # So pick them all up - for exp_number_end in range(index+4, len(tokens)): + for exp_number_end in range(index + 4, len(tokens)): if tokens[exp_number_end].type != tokenlib.NUMBER: break - e_notation_line = remaining_line[:tokens[exp_number_end].start[1]-tokens[index+1].start[1]] - exp_number = '1.0e' + ''.join([digit.string for digit in tokens[index+3:exp_number_end]]) - exp_number_token = tokenize.TokenInfo(type=tokenlib.NUMBER, string=exp_number, start=(1, 0), end=(1, len(exp_number)), line=exp_number) - e_notation_operator = tokenize.TokenInfo(type=tokenlib.OP, string='*', start=(1, 0), end=(1, 1), line='*') - e_notation_scale, _ = build_eval_tree([exp_number_token, tokens[-1]], op_priority, 0, 0, tokens[exp_number_end].string) - scaled_left = EvalTreeNode(left, e_notation_operator, e_notation_scale) - scaled_right = EvalTreeNode(right, e_notation_operator, e_notation_scale) - result = EvalTreeNode(scaled_left, plus_minus_operator, scaled_right) + exp_number = "1.0e" + "".join( + [ + digit.string + for digit in tokens[index + 3 : exp_number_end] + ] + ) + exp_number_token = tokenize.TokenInfo( + type=tokenlib.NUMBER, + string=exp_number, + start=(1, 0), + end=(1, len(exp_number)), + line=exp_number, + ) + e_notation_operator = tokenize.TokenInfo( + type=tokenlib.OP, + string="*", + start=(1, 0), + end=(1, 1), + line="*", + ) + e_notation_scale, _ = build_eval_tree( + [exp_number_token, tokens[-1]], + op_priority, + 0, + 0, + tokens[exp_number_end].string, + ) + scaled_left = EvalTreeNode( + left, e_notation_operator, e_notation_scale + ) + scaled_right = EvalTreeNode( + right, e_notation_operator, e_notation_scale + ) + result = EvalTreeNode( + scaled_left, plus_minus_operator, scaled_right + ) index = exp_number_end # We know we are not at an ENDMARKER here continue @@ -292,25 +344,39 @@ def _number_or_nan(token): # a ufloat could be naked, meaning `nominal_value + / - std` and parses as a NUMBER # alas, we cannot simply consume the nominal_value and then see the +/- operator, because naive # parsing on the nominal_value thinks it needs to eval the + as part of the nominal_value. - if (index+4 < len(tokens) + if ( + index + 4 < len(tokens) and _number_or_nan(tokens[index]) - and tokens[index+1].string=='+' - and tokens[index+2].string=='/' - and tokens[index+3].string=='-' - and _number_or_nan(tokens[index+4])): + and tokens[index + 1].string == "+" + and tokens[index + 2].string == "/" + and tokens[index + 3].string == "-" + and _number_or_nan(tokens[index + 4]) + ): # The +/- operator binds tightest, so we don't need to end a previous binop - if tokens[index+5].type==tokenlib.NUMBER: + if tokens[index + 5].type == tokenlib.NUMBER: breakpoint() # get nominal_value left = EvalTreeNode(left=current_token) - plus_minus_line = tokens[index].line[tokens[index].start[1]:tokens[index+4].end[1]] - plus_minus_start = tokens[index+1].start - plus_minus_end = tokens[index+3].end - plus_minus_operator = tokenize.TokenInfo(type=tokenlib.OP, string='+/-', start=plus_minus_start, end=plus_minus_end, line=plus_minus_line) - remaining_line = tokens[index].line[tokens[index+4].end[1]:] + plus_minus_line = tokens[index].line[ + tokens[index].start[1] : tokens[index + 4].end[1] + ] + plus_minus_start = tokens[index + 1].start + plus_minus_end = tokens[index + 3].end + plus_minus_operator = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=plus_minus_start, + end=plus_minus_end, + line=plus_minus_line, + ) + # remaining_line = tokens[index].line[tokens[index + 4].end[1] :] right, _ = build_eval_tree( - [tokens[index+4], tokens[-1]], op_priority, 0, 0, tokens[index+4].string + [tokens[index + 4], tokens[-1]], + op_priority, + 0, + 0, + tokens[index + 4].string, ) result = EvalTreeNode(left, plus_minus_operator, right) index += 4 From 7d2fada5a9f2f58eed5fa7e8e4b718553e41a47f Mon Sep 17 00:00:00 2001 From: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 16 Oct 2022 11:29:22 -0400 Subject: [PATCH 03/24] Enhance support for `uncertainties`. See #1611, #1614. Signed-off-by: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- CHANGES | 1 + pint/compat.py | 150 ++++++++++++++-- pint/facets/measurement/objects.py | 19 +- pint/pint_eval.py | 275 ++++++++++++----------------- pint/testsuite/test_issues.py | 47 +++++ 5 files changed, 308 insertions(+), 184 deletions(-) diff --git a/CHANGES b/CHANGES index f3a85a337..896d0bf25 100644 --- a/CHANGES +++ b/CHANGES @@ -14,6 +14,7 @@ Pint Changelog - Fix a recursion error that would be raised when passing quantities to `cond` and `x`. (Issue #1510, #1530) - Update test_non_int tests for pytest. +- Better support for uncertainties (See #1611, #1614) 0.19.2 (2022-04-23) ------------------- diff --git a/pint/compat.py b/pint/compat.py index f5b03e352..a67da9a04 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -11,11 +11,21 @@ from __future__ import annotations import math +import token as tokenlib import tokenize from decimal import Decimal from io import BytesIO from numbers import Number +try: + from uncertainties import UFloat, ufloat + from uncertainties import unumpy as unp + + HAS_UNCERTAINTIES = True +except ImportError: + UFloat = ufloat = unp = None + HAS_UNCERTAINTIES = False + def missing_dependency(package, display_name=None): display_name = display_name or package @@ -29,10 +39,121 @@ def _inner(*args, **kwargs): return _inner +# https://stackoverflow.com/a/1517965/1291237 +class tokens_with_lookahead: + def __init__(self, iter): + self.iter = iter + self.buffer = [] + + def __iter__(self): + return self + + def __next__(self): + if self.buffer: + return self.buffer.pop(0) + else: + return self.iter.__next__() + + def lookahead(self, n): + """Return an item n entries ahead in the iteration.""" + while n >= len(self.buffer): + try: + self.buffer.append(self.iter.__next__()) + except StopIteration: + return None + return self.buffer[n] + + def tokenizer(input_string): - for tokinfo in tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline): + def _number_or_nan(token): + if token.type == tokenlib.NUMBER or ( + token.type == tokenlib.NAME and token.string == "nan" + ): + return True + return False + + gen = tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline) + toklist = tokens_with_lookahead(gen) + for tokinfo in toklist: if tokinfo.type != tokenize.ENCODING: - yield tokinfo + if ( + tokinfo.string == "+" + and toklist.lookahead(0).string == "/" + and toklist.lookahead(1).string == "-" + ): + line = tokinfo.line + start = tokinfo.start + for i in range(-1, 1): + next(toklist) + end = tokinfo.end + tokinfo = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=start, + end=end, + line=line, + ) + yield tokinfo + elif ( + tokinfo.string == "(" + and _number_or_nan(toklist.lookahead(0)) + and toklist.lookahead(1).string == "+" + and toklist.lookahead(2).string == "/" + and toklist.lookahead(3).string == "-" + and _number_or_nan(toklist.lookahead(4)) + and toklist.lookahead(5).string == ")" + ): + # ( NUM_OR_NAN +/- NUM_OR_NAN ) + start = tokinfo.start + end = toklist.lookahead(5).end + line = tokinfo.line[start[1] : end[1]] + nominal_value = toklist.lookahead(0) + std_dev = toklist.lookahead(4) + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=toklist.lookahead(1).start, + end=toklist.lookahead(3).end, + line=line, + ) + # Strip parentheses and let tight binding of +/- do its work + for i in range(-1, 5): + next(toklist) + yield nominal_value + yield plus_minus_op + yield std_dev + elif ( + tokinfo.type == tokenlib.NUMBER + and toklist.lookahead(0).string == "(" + and toklist.lookahead(1).type == tokenlib.NUMBER + and toklist.lookahead(2).string == ")" + ): + line = tokinfo.line + start = tokinfo.start + nominal_value = tokinfo + std_dev = toklist.lookahead(1) + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=toklist.lookahead(0).start, + end=toklist.lookahead(2).end, + line=line, + ) + for i in range(-1, 2): + next(toklist) + yield nominal_value + yield plus_minus_op + if "." not in std_dev.string: + std_dev = tokenize.TokenInfo( + type=std_dev.type, + string="0." + std_dev.string, + start=std_dev.start, + end=std_dev.end, + line=line, + ) + yield std_dev + else: + yield tokinfo # TODO: remove this warning after v0.10 @@ -47,7 +168,10 @@ class BehaviorChangeWarning(UserWarning): HAS_NUMPY = True NUMPY_VER = np.__version__ - NUMERIC_TYPES = (Number, Decimal, ndarray, np.number) + if HAS_UNCERTAINTIES: + NUMERIC_TYPES = (Number, Decimal, ndarray, np.number, UFloat) + else: + NUMERIC_TYPES = (Number, Decimal, ndarray, np.number) def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): if isinstance(value, (dict, bool)) or value is None: @@ -56,6 +180,11 @@ def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): raise ValueError("Quantity magnitude cannot be an empty string.") elif isinstance(value, (list, tuple)): return np.asarray(value) + elif HAS_UNCERTAINTIES: + from pint.facets.measurement.objects import Measurement + + if isinstance(value, Measurement): + return ufloat(value.value, value.error) if force_ndarray or ( force_ndarray_like and not is_duck_array_type(type(value)) ): @@ -109,16 +238,13 @@ def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): "lists and tuples are valid magnitudes for " "Quantity only when NumPy is present." ) - return value + elif HAS_UNCERTAINTIES: + from pint.facets.measurement.objects import Measurement + if isinstance(value, Measurement): + return ufloat(value.value, value.error) + return value -try: - from uncertainties import ufloat - - HAS_UNCERTAINTIES = True -except ImportError: - ufloat = None - HAS_UNCERTAINTIES = False try: from babel import Locale as Loc @@ -271,6 +397,8 @@ def isnan(obj, check_all: bool): try: return math.isnan(obj) except TypeError: + if HAS_UNCERTAINTIES: + return unp.isnan(obj) return False diff --git a/pint/facets/measurement/objects.py b/pint/facets/measurement/objects.py index 88fad0a73..aad2d3436 100644 --- a/pint/facets/measurement/objects.py +++ b/pint/facets/measurement/objects.py @@ -48,7 +48,7 @@ class Measurement(PlainQuantity): """ - def __new__(cls, value, error, units=MISSING): + def __new__(cls, value, error=MISSING, units=MISSING): if units is MISSING: try: value, units = value.magnitude, value.units @@ -60,17 +60,18 @@ def __new__(cls, value, error, units=MISSING): error = MISSING # used for check below else: units = "" - try: - error = error.to(units).magnitude - except AttributeError: - pass - if error is MISSING: + # We've already extracted the units from the Quantity above mag = value - elif error < 0: - raise ValueError("The magnitude of the error cannot be negative") else: - mag = ufloat(value, error) + try: + error = error.to(units).magnitude + except AttributeError: + pass + if error < 0: + raise ValueError("The magnitude of the error cannot be negative") + else: + mag = ufloat(value, error) inst = super().__new__(cls, mag, units) return inst diff --git a/pint/pint_eval.py b/pint/pint_eval.py index 634a901ac..b752fddd6 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -19,6 +19,7 @@ # For controlling order of operations _OP_PRIORITY = { + "±": 4, "+/-": 4, "**": 3, "^": 3, @@ -53,6 +54,7 @@ def _power(left, right): _BINARY_OPERATOR_MAP = { + "±": _ufloat, "+/-": _ufloat, "**": _power, "*": operator.mul, @@ -125,7 +127,6 @@ def evaluate(self, define_op, bin_op=None, un_op=None): # unary operator op_text = self.operator[1] if op_text not in un_op: - breakpoint() raise DefinitionSyntaxError('missing unary operator "%s"' % op_text) return un_op[op_text](self.left.evaluate(define_op, bin_op, un_op)) else: @@ -136,6 +137,89 @@ def evaluate(self, define_op, bin_op=None, un_op=None): from typing import Iterable +def peek_exp_number(tokens, index): + exp_number = None + exp_number_end = index + exp_is_negative = False + if ( + index + 2 < len(tokens) + and tokens[index + 1].string == "10" + and tokens[index + 2].string in "⁻⁰¹²³⁴⁵⁶⁷⁸⁹" + ): + if tokens[index + 2].string == "⁻": + exp_is_negative = True + for exp_number_end in range(index + 3, len(tokens)): + if tokens[exp_number_end].string not in "⁰¹²³⁴⁵⁶⁷⁸⁹": + break + exp_number = "".join( + [ + digit.string[0] - "⁰" + for digit in tokens[index + exp_is_negative + 2 : exp_number_end] + ] + ) + else: + if ( + index + 2 < len(tokens) + and tokens[index + 1].string == "e" + # No sign on the exponent, treat as + + and tokens[index + 2].type == tokenlib.NUMBER + ): + # Don't know why tokenizer doesn't bundle all these numbers together + for exp_number_end in range(index + 3, len(tokens)): + if tokens[exp_number_end].type != tokenlib.NUMBER: + break + elif ( + index + 3 < len(tokens) + and tokens[index + 1].string == "e" + and tokens[index + 2].string in ["+", "-"] + and tokens[index + 3].type == tokenlib.NUMBER + ): + if tokens[index + 2].string == "-": + exp_is_negative = True + # Don't know why tokenizer doesn't bundle all these numbers together + for exp_number_end in range(index + 4, len(tokens)): + if tokens[exp_number_end].type != tokenlib.NUMBER: + break + if exp_number_end > index: + exp_number = "".join( + [digit.string for digit in tokens[index + 3 : exp_number_end]] + ) + else: + return None, index + exp_number = "1.0e" + ("-" if exp_is_negative else "") + exp_number + assert exp_number_end != index + return exp_number, exp_number_end + + +def finish_exp_number(tokens, exp_number, exp_number_end, plus_minus_op, left, right): + exp_number_token = tokenize.TokenInfo( + type=tokenlib.NUMBER, + string=exp_number, + start=(1, 0), + end=(1, len(exp_number)), + line=exp_number, + ) + e_notation_operator = tokenize.TokenInfo( + type=tokenlib.OP, + string="*", + start=(1, 0), + end=(1, 1), + line="*", + ) + e_notation_scale, _ = build_eval_tree( + [exp_number_token, tokens[-1]], + None, + 0, + 0, + tokens[exp_number_end].string, + ) + scaled_left = EvalTreeNode(left, e_notation_operator, e_notation_scale) + scaled_right = EvalTreeNode(right, e_notation_operator, e_notation_scale) + result = EvalTreeNode(scaled_left, plus_minus_op, scaled_right) + index = exp_number_end + return result, index + + def build_eval_tree( tokens: Iterable[tokenize.TokenInfo], op_priority=None, @@ -173,13 +257,6 @@ def build_eval_tree( result = None - def _number_or_nan(token): - if token.type == tokenlib.NUMBER or ( - token.type == tokenlib.NAME and token.string == "nan" - ): - return True - return False - while True: current_token = tokens[index] token_type = current_token.type @@ -198,122 +275,18 @@ def _number_or_nan(token): # parenthetical group ending, but we need to close sub-operations within group return result, index - 1 elif token_text == "(": - # a ufloat is of the form `( nominal_value + / - std ) possible_e_notation` and parses as a NUMBER - # alas, we cannot simply consume the nominal_value and then see the +/- operator, because naive - # parsing on the nominal_value thinks it needs to eval the + as part of the nominal_value. - if ( - index + 6 < len(tokens) - and _number_or_nan(tokens[index + 1]) - and tokens[index + 2].string == "+" - and tokens[index + 3].string == "/" - and tokens[index + 4].string == "-" - and _number_or_nan(tokens[index + 5]) - ): - # breakpoint() - # get nominal_value - left, _ = build_eval_tree( - # This should feed the parser only a single token--the number representing the nominal_value - [tokens[index + 1], tokens[-1]], - op_priority, - 0, - 0, - tokens[index + 1].string, - ) - plus_minus_line = tokens[index].line[ - tokens[index].start[1] : tokens[index + 6].end[1] - ] - plus_minus_start = tokens[index + 2].start - plus_minus_end = tokens[index + 4].end - plus_minus_operator = tokenize.TokenInfo( - type=tokenlib.OP, - string="+/-", - start=plus_minus_start, - end=plus_minus_end, - line=plus_minus_line, - ) - # remaining_line = tokens[index].line[tokens[index + 6].end[1] :] - - right, _ = build_eval_tree( - [tokens[index + 5], tokens[-1]], - op_priority, - 0, - 0, - tokens[index + 5].string, - ) - if tokens[index + 6].string == ")": - # consume the uncertainty number seen thus far - index += 6 - else: - raise DefinitionSyntaxError( - "weird exit from ufloat construction" - ) - # now look for possible scientific e-notation - if ( - index + 4 < len(tokens) - and tokens[index + 1].string == "e" - and tokens[index + 2].string in ["+", "-"] - and tokens[index + 3].type == tokenlib.NUMBER - ): - # There may be more NUMBERS that follow because the tokenizer is lost. - # So pick them all up - for exp_number_end in range(index + 4, len(tokens)): - if tokens[exp_number_end].type != tokenlib.NUMBER: - break - exp_number = "1.0e" + "".join( - [ - digit.string - for digit in tokens[index + 3 : exp_number_end] - ] - ) - exp_number_token = tokenize.TokenInfo( - type=tokenlib.NUMBER, - string=exp_number, - start=(1, 0), - end=(1, len(exp_number)), - line=exp_number, - ) - e_notation_operator = tokenize.TokenInfo( - type=tokenlib.OP, - string="*", - start=(1, 0), - end=(1, 1), - line="*", - ) - e_notation_scale, _ = build_eval_tree( - [exp_number_token, tokens[-1]], - op_priority, - 0, - 0, - tokens[exp_number_end].string, - ) - scaled_left = EvalTreeNode( - left, e_notation_operator, e_notation_scale - ) - scaled_right = EvalTreeNode( - right, e_notation_operator, e_notation_scale - ) - result = EvalTreeNode( - scaled_left, plus_minus_operator, scaled_right - ) - index = exp_number_end - # We know we are not at an ENDMARKER here - continue - else: - result = EvalTreeNode(left, plus_minus_operator, right) - # We can fall through...index+=1 operation will consume ')' + # gather parenthetical group + right, index = build_eval_tree( + tokens, op_priority, index + 1, 0, token_text + ) + if not tokens[index][1] == ")": + raise DefinitionSyntaxError("weird exit from parentheses") + if result: + # implicit op with a parenthetical group, i.e. "3 (kg ** 2)" + result = EvalTreeNode(left=result, right=right) else: - # gather parenthetical group - right, index = build_eval_tree( - tokens, op_priority, index + 1, 0, token_text - ) - if not tokens[index][1] == ")": - raise DefinitionSyntaxError("weird exit from parentheses") - if result: - # implicit op with a parenthetical group, i.e. "3 (kg ** 2)" - result = EvalTreeNode(left=result, right=right) - else: - # get first token - result = right + # get first token + result = right elif token_text in op_priority: if result: # equal-priority operators are grouped in a left-to-right order, @@ -331,6 +304,20 @@ def _number_or_nan(token): right, index = build_eval_tree( tokens, op_priority, index + 1, depth + 1, token_text ) + if token_text in ["±", "+/-"]: + # See if we need to scale the nominal_value and std_dev terms by an eponent + exp_number, exp_number_end = peek_exp_number(tokens, index) + if exp_number: + result, index = finish_exp_number( + tokens, + exp_number, + exp_number_end, + current_token, + result, + right, + ) + # We know we are not at an ENDMARKER here + continue result = EvalTreeNode( left=result, operator=current_token, right=right ) @@ -341,47 +328,7 @@ def _number_or_nan(token): ) result = EvalTreeNode(left=right, operator=current_token) elif token_type == tokenlib.NUMBER or token_type == tokenlib.NAME: - # a ufloat could be naked, meaning `nominal_value + / - std` and parses as a NUMBER - # alas, we cannot simply consume the nominal_value and then see the +/- operator, because naive - # parsing on the nominal_value thinks it needs to eval the + as part of the nominal_value. - if ( - index + 4 < len(tokens) - and _number_or_nan(tokens[index]) - and tokens[index + 1].string == "+" - and tokens[index + 2].string == "/" - and tokens[index + 3].string == "-" - and _number_or_nan(tokens[index + 4]) - ): - # The +/- operator binds tightest, so we don't need to end a previous binop - if tokens[index + 5].type == tokenlib.NUMBER: - breakpoint() - # get nominal_value - left = EvalTreeNode(left=current_token) - plus_minus_line = tokens[index].line[ - tokens[index].start[1] : tokens[index + 4].end[1] - ] - plus_minus_start = tokens[index + 1].start - plus_minus_end = tokens[index + 3].end - plus_minus_operator = tokenize.TokenInfo( - type=tokenlib.OP, - string="+/-", - start=plus_minus_start, - end=plus_minus_end, - line=plus_minus_line, - ) - # remaining_line = tokens[index].line[tokens[index + 4].end[1] :] - - right, _ = build_eval_tree( - [tokens[index + 4], tokens[-1]], - op_priority, - 0, - 0, - tokens[index + 4].string, - ) - result = EvalTreeNode(left, plus_minus_operator, right) - index += 4 - continue - elif result: + if result: # tokens with an implicit operation i.e. "1 kg" if op_priority[""] <= op_priority.get(prev_op, -1): # previous operator is higher priority than implicit, so end diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index 51d33b178..d7dcc6496 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -862,6 +862,53 @@ def test_issue_1300(self): m = module_registry.Measurement(1, 0.1, "meter") assert m.default_format == "~P" + @helpers.requires_uncertainties() + def test_issue1611(self, module_registry): + from numpy.testing import assert_almost_equal + from uncertainties import ufloat + + u1 = ufloat(1.2, 0.34) + u2 = ufloat(5.6, 0.78) + q1_u = module_registry.Quantity(u2 - u1, "m") + q1_str = str(q1_u) + q1_str = "{:.4uS}".format(q1_u) + q1_m = q1_u.magnitude + q2_u = module_registry.Quantity(q1_str) + # Not equal because the uncertainties are differently random! + assert q1_u != q2_u + q2_m = q2_u.magnitude + + assert_almost_equal(q2_m.nominal_value, q1_m.nominal_value, decimal=9) + assert_almost_equal(q2_m.std_dev, q1_m.std_dev, decimal=4) + + q3_str = "12.34(5678)e-066 m" + q3_u = module_registry.Quantity(q3_str) + q3_m = q3_u.magnitude + assert q3_m < 1 + + @helpers.requires_uncertainties + def test_issue1614(self, module_registry): + from uncertainties import UFloat, ufloat + + q = module_registry.Quantity(1.0, "m") + assert isinstance(q, module_registry.Quantity) + m = module_registry.Measurement(2.0, 0.3, "m") + assert isinstance(m, module_registry.Measurement) + + u1 = ufloat(1.2, 3.4) + u2 = ufloat(5.6, 7.8) + q1_u = module_registry.Quantity(u1, "m") + m1 = module_registry.Measurement(q1_u) + assert m1.value.magnitude == u1.nominal_value + assert m1.error.magnitude == u1.std_dev + m2 = module_registry.Measurement(5.6, 7.8) # dimensionless + q2_u = module_registry.Quantity(m2) + assert isinstance(q2_u.magnitude, UFloat) + assert q2_u.magnitude.nominal_value == m2.value + assert q2_u.magnitude.nominal_value == u2.nominal_value + assert q2_u.magnitude.std_dev == m2.error + assert q2_u.magnitude.std_dev == u2.std_dev + if np is not None: From fc8564b86e07288b9e0f1d17734290090c63307d Mon Sep 17 00:00:00 2001 From: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Wed, 19 Oct 2022 08:00:24 -0400 Subject: [PATCH 04/24] Fix up failures and errors found by test suite. Signed-off-by: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/facets/numpy/quantity.py | 12 ++++++++++++ pint/facets/plain/quantity.py | 16 +++++++++++++++- pint/pint_eval.py | 11 +++++++++-- 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/pint/facets/numpy/quantity.py b/pint/facets/numpy/quantity.py index 243610033..429c58424 100644 --- a/pint/facets/numpy/quantity.py +++ b/pint/facets/numpy/quantity.py @@ -27,6 +27,15 @@ set_units_ufuncs, ) +try: + import uncertainties.unumpy as unp + from uncertainties import ufloat, UFloat + HAS_UNCERTAINTIES = True +except ImportError: + unp = np + ufloat = Ufloat = None + HAS_UNCERTAINTIES = False + def method_wraps(numpy_func): if isinstance(numpy_func, str): @@ -223,6 +232,9 @@ def __getattr__(self, item) -> Any: ) else: raise exc + elif HAS_UNCERTAINTIES and item=="ndim" and isinstance(self._magnitude, UFloat): + # Dimensionality of a single UFloat is 0, like any other scalar + return 0 try: return getattr(self._magnitude, item) diff --git a/pint/facets/plain/quantity.py b/pint/facets/plain/quantity.py index d4c1a55ed..791bb2fce 100644 --- a/pint/facets/plain/quantity.py +++ b/pint/facets/plain/quantity.py @@ -62,6 +62,15 @@ if HAS_NUMPY: import numpy as np # noqa +try: + import uncertainties.unumpy as unp + from uncertainties import ufloat, UFloat + HAS_UNCERTAINTIES = True +except ImportError: + unp = np + ufloat = Ufloat = None + HAS_UNCERTAINTIES = False + def reduce_dimensions(f): def wrapped(self, *args, **kwargs): @@ -267,7 +276,12 @@ def __bytes__(self) -> bytes: return str(self).encode(locale.getpreferredencoding()) def __repr__(self) -> str: - if isinstance(self._magnitude, float): + if HAS_UNCERTAINTIES: + if isinstance(self._magnitude, UFloat): + return f"" + else: + return f"" + elif isinstance(self._magnitude, float): return f"" else: return f"" diff --git a/pint/pint_eval.py b/pint/pint_eval.py index b752fddd6..d8b46eb09 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -13,7 +13,12 @@ import token as tokenlib import tokenize -from uncertainties import ufloat +try: + from uncertainties import ufloat + HAS_UNCERTAINTIES = True +except ImportError: + HAS_UNCERTAINTIES = False + ufloat = None from .errors import DefinitionSyntaxError @@ -35,7 +40,9 @@ def _ufloat(left, right): - return ufloat(left, right) + if HAS_UNCERTAINTIES: + return ufloat(left, right) + raise TypeError ('Could not import support for uncertainties') def _power(left, right): From c8fe27ff2160d2dd62fc4fc5cba145a4e2c5bda2 Mon Sep 17 00:00:00 2001 From: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Fri, 21 Oct 2022 05:07:03 -0400 Subject: [PATCH 05/24] Copy in changes from PR1596 Signed-off-by: 72577720+MichaelTiemannOSC@users.noreply.github.com --- CHANGES | 2 ++ pint/compat.py | 4 ++-- pint/facets/plain/quantity.py | 6 ++++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 896d0bf25..efe65cba2 100644 --- a/CHANGES +++ b/CHANGES @@ -11,6 +11,8 @@ Pint Changelog (Issue #1030, #574) - Added angular frequency documentation page. - Move ASV benchmarks to dedicated folder. (Issue #1542) +- An ndim attribute has been added to Quantity and DataFrame has been added to upcast + types for pint-pandas compatibility. (#1596) - Fix a recursion error that would be raised when passing quantities to `cond` and `x`. (Issue #1510, #1530) - Update test_non_int tests for pytest. diff --git a/pint/compat.py b/pint/compat.py index a67da9a04..6d5b905fa 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -281,9 +281,9 @@ def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): # Pandas (Series) try: - from pandas import Series + from pandas import DataFrame, Series - upcast_types.append(Series) + upcast_types += [DataFrame, Series] except ImportError: pass diff --git a/pint/facets/plain/quantity.py b/pint/facets/plain/quantity.py index 791bb2fce..40a963ae8 100644 --- a/pint/facets/plain/quantity.py +++ b/pint/facets/plain/quantity.py @@ -152,6 +152,12 @@ class PlainQuantity(PrettyIPython, SharedRegistryObject, Generic[_MagnitudeType] default_format: str = "" _magnitude: _MagnitudeType + @property + def ndim(self) -> int: + if isinstance(self.magnitude, numbers.Number): + return 0 + return self.magnitude.ndim + @property def force_ndarray(self) -> bool: return self._REGISTRY.force_ndarray From 126a859a5670a10812efcf5ad1a18c7c8ad579d3 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Fri, 18 Nov 2022 16:40:11 +1300 Subject: [PATCH 06/24] Create modular uncertainty parser layer Based on feedback, tokenize uncertainties on top of default tokenizer, not instead of default tokenizer. Signed-off-by: MichaelTiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/compat.py | 120 ------------ pint/facets/plain/registry.py | 5 +- pint/pint_eval.py | 292 +++++++++++++++++++---------- pint/testsuite/test_issues.py | 3 + pint/testsuite/test_measurement.py | 10 +- pint/testsuite/test_pint_eval.py | 6 +- pint/testsuite/test_util.py | 4 +- pint/util.py | 5 +- 8 files changed, 219 insertions(+), 226 deletions(-) diff --git a/pint/compat.py b/pint/compat.py index 6d5b905fa..3dd65029c 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -11,10 +11,7 @@ from __future__ import annotations import math -import token as tokenlib -import tokenize from decimal import Decimal -from io import BytesIO from numbers import Number try: @@ -39,123 +36,6 @@ def _inner(*args, **kwargs): return _inner -# https://stackoverflow.com/a/1517965/1291237 -class tokens_with_lookahead: - def __init__(self, iter): - self.iter = iter - self.buffer = [] - - def __iter__(self): - return self - - def __next__(self): - if self.buffer: - return self.buffer.pop(0) - else: - return self.iter.__next__() - - def lookahead(self, n): - """Return an item n entries ahead in the iteration.""" - while n >= len(self.buffer): - try: - self.buffer.append(self.iter.__next__()) - except StopIteration: - return None - return self.buffer[n] - - -def tokenizer(input_string): - def _number_or_nan(token): - if token.type == tokenlib.NUMBER or ( - token.type == tokenlib.NAME and token.string == "nan" - ): - return True - return False - - gen = tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline) - toklist = tokens_with_lookahead(gen) - for tokinfo in toklist: - if tokinfo.type != tokenize.ENCODING: - if ( - tokinfo.string == "+" - and toklist.lookahead(0).string == "/" - and toklist.lookahead(1).string == "-" - ): - line = tokinfo.line - start = tokinfo.start - for i in range(-1, 1): - next(toklist) - end = tokinfo.end - tokinfo = tokenize.TokenInfo( - type=tokenlib.OP, - string="+/-", - start=start, - end=end, - line=line, - ) - yield tokinfo - elif ( - tokinfo.string == "(" - and _number_or_nan(toklist.lookahead(0)) - and toklist.lookahead(1).string == "+" - and toklist.lookahead(2).string == "/" - and toklist.lookahead(3).string == "-" - and _number_or_nan(toklist.lookahead(4)) - and toklist.lookahead(5).string == ")" - ): - # ( NUM_OR_NAN +/- NUM_OR_NAN ) - start = tokinfo.start - end = toklist.lookahead(5).end - line = tokinfo.line[start[1] : end[1]] - nominal_value = toklist.lookahead(0) - std_dev = toklist.lookahead(4) - plus_minus_op = tokenize.TokenInfo( - type=tokenlib.OP, - string="+/-", - start=toklist.lookahead(1).start, - end=toklist.lookahead(3).end, - line=line, - ) - # Strip parentheses and let tight binding of +/- do its work - for i in range(-1, 5): - next(toklist) - yield nominal_value - yield plus_minus_op - yield std_dev - elif ( - tokinfo.type == tokenlib.NUMBER - and toklist.lookahead(0).string == "(" - and toklist.lookahead(1).type == tokenlib.NUMBER - and toklist.lookahead(2).string == ")" - ): - line = tokinfo.line - start = tokinfo.start - nominal_value = tokinfo - std_dev = toklist.lookahead(1) - plus_minus_op = tokenize.TokenInfo( - type=tokenlib.OP, - string="+/-", - start=toklist.lookahead(0).start, - end=toklist.lookahead(2).end, - line=line, - ) - for i in range(-1, 2): - next(toklist) - yield nominal_value - yield plus_minus_op - if "." not in std_dev.string: - std_dev = tokenize.TokenInfo( - type=std_dev.type, - string="0." + std_dev.string, - start=std_dev.start, - end=std_dev.end, - line=line, - ) - yield std_dev - else: - yield tokinfo - - # TODO: remove this warning after v0.10 class BehaviorChangeWarning(UserWarning): pass diff --git a/pint/facets/plain/registry.py b/pint/facets/plain/registry.py index 8572fece9..e7cbb79a5 100644 --- a/pint/facets/plain/registry.py +++ b/pint/facets/plain/registry.py @@ -42,9 +42,10 @@ from pint import Quantity, Unit from ... import parser +from ... import pint_eval from ..._typing import QuantityOrUnitLike, UnitLike from ..._vendor import appdirs -from ...compat import HAS_BABEL, babel_parse, tokenizer +from ...compat import HAS_BABEL, babel_parse from ...definitions import Definition from ...errors import ( DefinitionSyntaxError, @@ -1346,7 +1347,7 @@ def parse_expression( for p in self.preprocessors: input_string = p(input_string) input_string = string_preprocessor(input_string) - gen = tokenizer(input_string) + gen = pint_eval.tokenizer(input_string) return build_eval_tree(gen).evaluate( lambda x: self._eval_token(x, case_sensitive=case_sensitive, **values) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index d8b46eb09..84f21d46b 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -9,6 +9,7 @@ """ from __future__ import annotations +from io import BytesIO import operator import token as tokenlib import tokenize @@ -60,6 +61,200 @@ def _power(left, right): return operator.pow(left, right) +# https://stackoverflow.com/a/1517965/1291237 +class tokens_with_lookahead: + def __init__(self, iter): + self.iter = iter + self.buffer = [] + + def __iter__(self): + return self + + def __next__(self): + if self.buffer: + return self.buffer.pop(0) + else: + return self.iter.__next__() + + def lookahead(self, n): + """Return an item n entries ahead in the iteration.""" + while n >= len(self.buffer): + try: + self.buffer.append(self.iter.__next__()) + except StopIteration: + return None + return self.buffer[n] + + +def _plain_tokenizer(input_string): + for tokinfo in tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline): + if tokinfo.type != tokenlib.ENCODING: + yield tokinfo + +def uncertainty_tokenizer(input_string): + def _number_or_nan(token): + if token.type == tokenlib.NUMBER or ( + token.type == tokenlib.NAME and token.string == "nan" + ): + return True + return False + + def _get_possible_e(toklist, e_index): + possible_e_token = toklist.lookahead(e_index) + if (possible_e_token.string[0]=="e" + and len(possible_e_token.string)>1 + and possible_e_token.string[1].isdigit()): + end = possible_e_token.end + possible_e = tokenize.TokenInfo( + type=tokenlib.STRING, + string=possible_e_token.string, + start=possible_e_token.start, + end=end, + line=possible_e_token.line) + elif (possible_e_token.string[0] in ["e", "E"] + and toklist.lookahead(e_index+1).string in ["+", "-"] + and toklist.lookahead(e_index+2).type==tokenlib.NUMBER): + # Special case: Python allows a leading zero for exponents (i.e., 042) but not for numbers + if toklist.lookahead(e_index+2).string == "0" and toklist.lookahead(e_index+3).type==tokenlib.NUMBER: + exp_number = toklist.lookahead(e_index+3).string + end = toklist.lookahead(e_index+3).end + else: + exp_number = toklist.lookahead(e_index+2).string + end = toklist.lookahead(e_index+2).end + possible_e = tokenize.TokenInfo( + type=tokenlib.STRING, + string=f"e{toklist.lookahead(e_index+1).string}{exp_number}", + start=possible_e_token.start, + end=end, + line=possible_e_token.line) + else: + possible_e = None + return possible_e + + def _apply_e_notation(mantissa, exponent): + if mantissa.string == 'nan': + return mantissa + if float(mantissa.string)==0.0: + return mantissa + return tokenize.TokenInfo( + type=tokenlib.NUMBER, + string=f"{mantissa.string}{exponent.string}", + start=mantissa.start, + end=exponent.end, + line=exponent.line + ) + + def _finalize_e(nominal_value, std_dev, toklist, possible_e): + nominal_value = _apply_e_notation(nominal_value, possible_e) + std_dev = _apply_e_notation(std_dev, possible_e) + next(toklist) # consume 'e' and positive exponent value + if possible_e.string[1]=='-': + next(toklist) # consume '+' or '-' in exponent + exp_number = next(toklist) # consume exponent value + if exp_number.end < end: + exp_number = next(toklist) + assert(exp_number.end==end) + return nominal_value, std_dev + + # when tokenize encounters whitespace followed by an unknown character, + # (such as ±) it proceeds to mark every character of the whitespace as ERRORTOKEN, + # in addition to marking the unknown character as ERRORTOKEN. Rather than + # wading through all that vomit, just eliminate the problem + # in the input by rewriting ± as +/-. + input_string = input_string.replace('±', '+/-') + toklist = tokens_with_lookahead(_plain_tokenizer(input_string)) + for tokinfo in toklist: + line = tokinfo.line + start = tokinfo.start + if ( + tokinfo.string == "+" + and toklist.lookahead(0).string == "/" + and toklist.lookahead(1).string == "-" + ): + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=start, + end=toklist.lookahead(1).end, + line=line, + ) + for i in range(-1, 1): + next(toklist) + yield plus_minus_op + elif ( + tokinfo.string == "(" + and _number_or_nan(toklist.lookahead(0)) + and toklist.lookahead(1).string == "+" + and toklist.lookahead(2).string == "/" + and toklist.lookahead(3).string == "-" + and _number_or_nan(toklist.lookahead(4)) + and toklist.lookahead(5).string == ")" + ): + # ( NUM_OR_NAN +/- NUM_OR_NAN ) POSSIBLE_E_NOTATION + possible_e = _get_possible_e (toklist, 6) + if possible_e: + end = possible_e.end + else: + end = toklist.lookahead(5).end + nominal_value = next(toklist) + tokinfo = next(toklist) # consume '+' + next(toklist) # consume '/' + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=tokinfo.start, + end=next(toklist).end, # consume '-' + line=line, + ) + std_dev = next(toklist) + next(toklist) # consume final ')' + if possible_e: + nominal_value, std_dev = _finalize_e(nominal_value, std_dev, toklist, possible_e) + yield nominal_value + yield plus_minus_op + yield std_dev + elif ( + tokinfo.type == tokenlib.NUMBER + and toklist.lookahead(0).string == "(" + and toklist.lookahead(1).type == tokenlib.NUMBER + and toklist.lookahead(2).string == ")" + ): + # NUM_OR_NAN ( NUM_OR_NAN ) POSSIBLE_E_NOTATION + possible_e = _get_possible_e (toklist, 3) + if possible_e: + end = possible_e.end + else: + end = toklist.lookahead(2).end + nominal_value = tokinfo + tokinfo = next(toklist) # consume '(' + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=tokinfo.start, + end=tokinfo.end, # this is funky because there's no "+/-" in nominal(std_dev) notation + line=line, + ) + std_dev = next(toklist) + if "." not in std_dev.string: + std_dev = tokenize.TokenInfo( + type=std_dev.type, + string="0." + std_dev.string, + start=std_dev.start, + end=std_dev.end, + line=line, + ) + next(toklist) # consume final ')' + if possible_e: + nominal_value, std_dev = _finalize_e(nominal_value, std_dev, toklist, possible_e) + yield nominal_value + yield plus_minus_op + yield std_dev + else: + yield tokinfo + + +tokenizer = _plain_tokenizer + _BINARY_OPERATOR_MAP = { "±": _ufloat, "+/-": _ufloat, @@ -144,89 +339,6 @@ def evaluate(self, define_op, bin_op=None, un_op=None): from typing import Iterable -def peek_exp_number(tokens, index): - exp_number = None - exp_number_end = index - exp_is_negative = False - if ( - index + 2 < len(tokens) - and tokens[index + 1].string == "10" - and tokens[index + 2].string in "⁻⁰¹²³⁴⁵⁶⁷⁸⁹" - ): - if tokens[index + 2].string == "⁻": - exp_is_negative = True - for exp_number_end in range(index + 3, len(tokens)): - if tokens[exp_number_end].string not in "⁰¹²³⁴⁵⁶⁷⁸⁹": - break - exp_number = "".join( - [ - digit.string[0] - "⁰" - for digit in tokens[index + exp_is_negative + 2 : exp_number_end] - ] - ) - else: - if ( - index + 2 < len(tokens) - and tokens[index + 1].string == "e" - # No sign on the exponent, treat as + - and tokens[index + 2].type == tokenlib.NUMBER - ): - # Don't know why tokenizer doesn't bundle all these numbers together - for exp_number_end in range(index + 3, len(tokens)): - if tokens[exp_number_end].type != tokenlib.NUMBER: - break - elif ( - index + 3 < len(tokens) - and tokens[index + 1].string == "e" - and tokens[index + 2].string in ["+", "-"] - and tokens[index + 3].type == tokenlib.NUMBER - ): - if tokens[index + 2].string == "-": - exp_is_negative = True - # Don't know why tokenizer doesn't bundle all these numbers together - for exp_number_end in range(index + 4, len(tokens)): - if tokens[exp_number_end].type != tokenlib.NUMBER: - break - if exp_number_end > index: - exp_number = "".join( - [digit.string for digit in tokens[index + 3 : exp_number_end]] - ) - else: - return None, index - exp_number = "1.0e" + ("-" if exp_is_negative else "") + exp_number - assert exp_number_end != index - return exp_number, exp_number_end - - -def finish_exp_number(tokens, exp_number, exp_number_end, plus_minus_op, left, right): - exp_number_token = tokenize.TokenInfo( - type=tokenlib.NUMBER, - string=exp_number, - start=(1, 0), - end=(1, len(exp_number)), - line=exp_number, - ) - e_notation_operator = tokenize.TokenInfo( - type=tokenlib.OP, - string="*", - start=(1, 0), - end=(1, 1), - line="*", - ) - e_notation_scale, _ = build_eval_tree( - [exp_number_token, tokens[-1]], - None, - 0, - 0, - tokens[exp_number_end].string, - ) - scaled_left = EvalTreeNode(left, e_notation_operator, e_notation_scale) - scaled_right = EvalTreeNode(right, e_notation_operator, e_notation_scale) - result = EvalTreeNode(scaled_left, plus_minus_op, scaled_right) - index = exp_number_end - return result, index - - def build_eval_tree( tokens: Iterable[tokenize.TokenInfo], op_priority=None, @@ -311,20 +423,6 @@ def build_eval_tree( right, index = build_eval_tree( tokens, op_priority, index + 1, depth + 1, token_text ) - if token_text in ["±", "+/-"]: - # See if we need to scale the nominal_value and std_dev terms by an eponent - exp_number, exp_number_end = peek_exp_number(tokens, index) - if exp_number: - result, index = finish_exp_number( - tokens, - exp_number, - exp_number_end, - current_token, - result, - right, - ) - # We know we are not at an ENDMARKER here - continue result = EvalTreeNode( left=result, operator=current_token, right=right ) diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index d7dcc6496..bd9fe20ec 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -867,6 +867,9 @@ def test_issue1611(self, module_registry): from numpy.testing import assert_almost_equal from uncertainties import ufloat + from pint import pint_eval + pint_eval.tokenizer = pint_eval.uncertainty_tokenizer + u1 = ufloat(1.2, 0.34) u2 = ufloat(5.6, 0.78) q1_u = module_registry.Quantity(u2 - u1, "m") diff --git a/pint/testsuite/test_measurement.py b/pint/testsuite/test_measurement.py index 926b4d6a6..b61aed882 100644 --- a/pint/testsuite/test_measurement.py +++ b/pint/testsuite/test_measurement.py @@ -2,7 +2,7 @@ from pint import DimensionalityError from pint.testsuite import QuantityTestCase, helpers - +from pint import pint_eval # TODO: do not subclass from QuantityTestCase @helpers.requires_not_uncertainties() @@ -272,3 +272,11 @@ def test_measurement_comparison(self): y = self.Q_(5.0, "meter").plus_minus(0.1) assert x <= y assert not (x >= y) + + def test_tokenization(self): + from pint import pint_eval + + pint_eval.tokenizer = pint_eval.uncertainty_tokenizer + for p in pint_eval.tokenizer("8 + / - 4"): + print(p) + assert True diff --git a/pint/testsuite/test_pint_eval.py b/pint/testsuite/test_pint_eval.py index bed81057d..10ae988c8 100644 --- a/pint/testsuite/test_pint_eval.py +++ b/pint/testsuite/test_pint_eval.py @@ -1,12 +1,14 @@ import pytest -from pint.compat import tokenizer +from pint import pint_eval from pint.pint_eval import build_eval_tree +# This is how we enable the parsing of uncertainties +# pint_eval.tokenizer = pint_eval.uncertainty_tokenizer class TestPintEval: def _test_one(self, input_text, parsed): - assert build_eval_tree(tokenizer(input_text)).to_string() == parsed + assert build_eval_tree(pint_eval.tokenizer(input_text)).to_string() == parsed @pytest.mark.parametrize( ("input_text", "parsed"), diff --git a/pint/testsuite/test_util.py b/pint/testsuite/test_util.py index d2eebe59a..053d20238 100644 --- a/pint/testsuite/test_util.py +++ b/pint/testsuite/test_util.py @@ -5,6 +5,7 @@ import pytest +from pint import pint_eval from pint.util import ( ParserHelper, UnitsContainer, @@ -15,7 +16,6 @@ sized, string_preprocessor, to_units_container, - tokenizer, transpose, ) @@ -194,7 +194,7 @@ def test_calculate(self): assert dict(seconds=1) / z() == ParserHelper(0.5, seconds=1, meter=-2) def _test_eval_token(self, expected, expression, use_decimal=False): - token = next(tokenizer(expression)) + token = next(pint_eval.tokenizer(expression)) actual = ParserHelper.eval_token(token, use_decimal=use_decimal) assert expected == actual assert type(expected) == type(actual) diff --git a/pint/util.py b/pint/util.py index 54a7755d3..2236efe79 100644 --- a/pint/util.py +++ b/pint/util.py @@ -23,10 +23,11 @@ from token import NAME, NUMBER from typing import TYPE_CHECKING, ClassVar, Optional, Type, Union -from .compat import NUMERIC_TYPES, tokenizer +from .compat import NUMERIC_TYPES from .errors import DefinitionSyntaxError from .formatting import format_unit from .pint_eval import build_eval_tree +from . import pint_eval if TYPE_CHECKING: from pint import Quantity, UnitRegistry @@ -620,7 +621,7 @@ def from_string(cls, input_string, non_int_type=float): else: reps = False - gen = tokenizer(input_string) + gen = pint_eval.tokenizer(input_string) ret = build_eval_tree(gen).evaluate( partial(cls.eval_token, non_int_type=non_int_type) ) From f89e183e1b39a4e44bcced708932211dbd34bb26 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Thu, 29 Dec 2022 15:29:16 +1300 Subject: [PATCH 07/24] Fix conflict merge error Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/facets/plain/registry.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/pint/facets/plain/registry.py b/pint/facets/plain/registry.py index da6a3c4d6..8d0f74aa3 100644 --- a/pint/facets/plain/registry.py +++ b/pint/facets/plain/registry.py @@ -41,18 +41,11 @@ from ..context import Context from pint import Quantity, Unit -from ... import parser from ... import pint_eval from ..._typing import QuantityOrUnitLike, UnitLike from ..._vendor import appdirs from ...compat import HAS_BABEL, babel_parse -from ...definitions import Definition -from ...errors import ( - DefinitionSyntaxError, - DimensionalityError, - RedefinitionError, - UndefinedUnitError, -) +from ...errors import DimensionalityError, RedefinitionError, UndefinedUnitError from ...pint_eval import build_eval_tree from ...util import ParserHelper from ...util import UnitsContainer From 7198cf01e6a3dd48d899acc4b455c50e34f59d26 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Tue, 3 Jan 2023 18:58:05 +1300 Subject: [PATCH 08/24] Update util.py Fixes problems parsing currency symbols that also show up when dealing with uncertainties. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/util.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pint/util.py b/pint/util.py index 440ac5ee3..9cc6d2d89 100644 --- a/pint/util.py +++ b/pint/util.py @@ -899,6 +899,8 @@ def to_units_container( return unit_like._units elif str in mro: if registry: + for p in registry.preprocessors: + unit_like = p(unit_like) return registry._parse_units(unit_like) else: return ParserHelper.from_string(unit_like) From e5004a545b2100dae852926dcc6f5ce7d363d348 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Tue, 17 Jan 2023 16:42:44 +1300 Subject: [PATCH 09/24] Update pint_eval.py Handle negative numbers using uncertainty parenthesis notation. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/pint_eval.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index b8c63a85c..b4effe9eb 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -182,19 +182,23 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): yield plus_minus_op elif ( tokinfo.string == "(" - and _number_or_nan(toklist.lookahead(0)) - and toklist.lookahead(1).string == "+" - and toklist.lookahead(2).string == "/" - and toklist.lookahead(3).string == "-" - and _number_or_nan(toklist.lookahead(4)) - and toklist.lookahead(5).string == ")" + and (seen_minus := 1 if toklist.lookahead(0).string == "-" else 0) + and _number_or_nan(toklist.lookahead(seen_minus)) + and toklist.lookahead(seen_minus + 1).string == "+" + and toklist.lookahead(seen_minus + 2).string == "/" + and toklist.lookahead(seen_minus + 3).string == "-" + and _number_or_nan(toklist.lookahead(seen_minus + 4)) + and toklist.lookahead(seen_minus + 5).string == ")" ): # ( NUM_OR_NAN +/- NUM_OR_NAN ) POSSIBLE_E_NOTATION - possible_e = _get_possible_e (toklist, 6) + possible_e = _get_possible_e (toklist, seen_minus + 6) if possible_e: end = possible_e.end else: - end = toklist.lookahead(5).end + end = toklist.lookahead(seen_minus + 5).end + if seen_minus: + minus_op = next(toklist) + yield minus_op nominal_value = next(toklist) tokinfo = next(toklist) # consume '+' next(toklist) # consume '/' From a4a1fa57a4236c5bf853fee198e484efb036a2b3 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Tue, 17 Jan 2023 17:02:43 +1300 Subject: [PATCH 10/24] Update pint_eval.py Ahem...use walrus operator for side-effect, not truth value. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/pint_eval.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index b4effe9eb..08df874a0 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -182,7 +182,7 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): yield plus_minus_op elif ( tokinfo.string == "(" - and (seen_minus := 1 if toklist.lookahead(0).string == "-" else 0) + and ((seen_minus := 1 if toklist.lookahead(0).string == "-" else 0) or True) and _number_or_nan(toklist.lookahead(seen_minus)) and toklist.lookahead(seen_minus + 1).string == "+" and toklist.lookahead(seen_minus + 2).string == "/" From 79380564ef83ab19a780602454f0a0a2afe475ba Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sat, 18 Feb 2023 14:36:05 -0500 Subject: [PATCH 11/24] Fixed to work with both + and - e notation in the actually processing of the exponent, not just in the parsing of the exponent. i.e., (5.01+/-0.07)e+04 Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/pint_eval.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index 08df874a0..7d4306308 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -147,12 +147,13 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): nominal_value = _apply_e_notation(nominal_value, possible_e) std_dev = _apply_e_notation(std_dev, possible_e) next(toklist) # consume 'e' and positive exponent value - if possible_e.string[1]=='-': - next(toklist) # consume '+' or '-' in exponent + if possible_e.string[1] in ["+", "-"]: + next(toklist) # consume "+" or "-" in exponent exp_number = next(toklist) # consume exponent value - if exp_number.end < end: + if exp_number.string == "0" and toklist.lookahead(0).type==tokenlib.NUMBER: exp_number = next(toklist) assert(exp_number.end==end) + # We've already applied the number, we're just consuming all the tokens return nominal_value, std_dev # when tokenize encounters whitespace followed by an unknown character, From caa5a1ae862e7f683db6ff3c5ca04823c7734944 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Mon, 1 May 2023 17:06:43 -0400 Subject: [PATCH 12/24] Fix test suite failures Manually fix test_issue_1400. Let other failures (which are not related to uncertainties) fail. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/compat.py | 2 +- pint/formatting.py | 10 +++++++--- pint/testsuite/test_issues.py | 8 ++++++++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/pint/compat.py b/pint/compat.py index 919ee0545..a22899e72 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -14,7 +14,7 @@ from decimal import Decimal from importlib import import_module from numbers import Number -from typing import Mapping, Option +from typing import Mapping, Optional try: from uncertainties import UFloat, ufloat diff --git a/pint/formatting.py b/pint/formatting.py index f450d5f51..5cae6b724 100644 --- a/pint/formatting.py +++ b/pint/formatting.py @@ -353,9 +353,13 @@ def formatter( # Don't remove this positional! This is the format used in Babel key = pat.replace("{0}", "").strip() break - division_fmt = compound_unit_patterns.get("per", {}).get( - babel_length, division_fmt - ) + + tmp = compound_unit_patterns.get("per", {}).get(babel_length, division_fmt) + + try: + division_fmt = tmp.get("compound", division_fmt) + except AttributeError: + division_fmt = tmp power_fmt = "{}{}" exp_call = _pretty_fmt_exponent if value == 1: diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index d46f95cce..09e296a6c 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -881,6 +881,14 @@ def test_issue_1300(self): m = module_registry.Measurement(1, 0.1, "meter") assert m.default_format == "~P" + def test_issue_1400(self, sess_registry): + q1 = 3 * sess_registry.W + q2 = 3 * sess_registry.W / sess_registry.cm + assert q1.format_babel("~", locale="es_Ar") == "3 W" + assert q1.format_babel("", locale="es_Ar") == "3 vatios" + assert q2.format_babel("~", locale="es_Ar") == "3.0 W / cm" + assert q2.format_babel("", locale="es_Ar") == "3.0 vatios por centímetros" + @helpers.requires_uncertainties() def test_issue1611(self, module_registry): from numpy.testing import assert_almost_equal From b810af6c38985790a763817d0ad7f07d66d0ad10 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 25 Jun 2023 07:51:44 -0400 Subject: [PATCH 13/24] Fix tokenizer merge error in pint/util.py When using pint_eval.tokenizer don't try to import tokenizer from pint.compat. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pint/util.py b/pint/util.py index c7520cea8..19b8420c3 100644 --- a/pint/util.py +++ b/pint/util.py @@ -32,7 +32,7 @@ ) from collections.abc import Hashable, Generator -from .compat import NUMERIC_TYPES, tokenizer, Self +from .compat import NUMERIC_TYPES, Self from .errors import DefinitionSyntaxError from .formatting import format_unit from .pint_eval import build_eval_tree From 810a0925aa5f520698e3b22e208a61c5a31d282b Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 25 Jun 2023 10:56:51 -0400 Subject: [PATCH 14/24] Merge cleanup: pint_eval.py needs tokenize Clean up merge import error. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/pint_eval.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index 9dc71317b..0486cd4eb 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -12,6 +12,7 @@ from io import BytesIO import operator import token as tokenlib +import tokenize from tokenize import TokenInfo from typing import Any, Optional, Union From 5a4eb10078e29bd31de05dff2813d27513b72a2d Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 25 Jun 2023 11:06:56 -0400 Subject: [PATCH 15/24] Make black happier Run `black` with default arguments to try to match whatever `black` wants to see in the CI/CD world. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/facets/numpy/quantity.py | 5 +- pint/facets/plain/quantity.py | 1 + pint/pint_eval.py | 86 ++++++++++++++++++------------ pint/testsuite/test_issues.py | 1 + pint/testsuite/test_measurement.py | 1 + pint/testsuite/test_pint_eval.py | 1 + pint/toktest.py | 4 +- 7 files changed, 62 insertions(+), 37 deletions(-) diff --git a/pint/facets/numpy/quantity.py b/pint/facets/numpy/quantity.py index 32ca74d57..5257766bc 100644 --- a/pint/facets/numpy/quantity.py +++ b/pint/facets/numpy/quantity.py @@ -32,6 +32,7 @@ try: import uncertainties.unumpy as unp from uncertainties import ufloat, UFloat + HAS_UNCERTAINTIES = True except ImportError: unp = np @@ -233,7 +234,9 @@ def __getattr__(self, item) -> Any: ) else: raise exc - elif HAS_UNCERTAINTIES and item=="ndim" and isinstance(self._magnitude, UFloat): + elif ( + HAS_UNCERTAINTIES and item == "ndim" and isinstance(self._magnitude, UFloat) + ): # Dimensionality of a single UFloat is 0, like any other scalar return 0 diff --git a/pint/facets/plain/quantity.py b/pint/facets/plain/quantity.py index e45139785..bd70532e2 100644 --- a/pint/facets/plain/quantity.py +++ b/pint/facets/plain/quantity.py @@ -58,6 +58,7 @@ try: import uncertainties.unumpy as unp from uncertainties import ufloat, UFloat + HAS_UNCERTAINTIES = True except ImportError: unp = np diff --git a/pint/pint_eval.py b/pint/pint_eval.py index 0486cd4eb..f020718ca 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -19,6 +19,7 @@ try: from uncertainties import ufloat + HAS_UNCERTAINTIES = True except ImportError: HAS_UNCERTAINTIES = False @@ -45,7 +46,7 @@ def _ufloat(left, right): if HAS_UNCERTAINTIES: return ufloat(left, right) - raise TypeError ('Could not import support for uncertainties') + raise TypeError("Could not import support for uncertainties") def _power(left: Any, right: Any) -> Any: @@ -93,6 +94,7 @@ def _plain_tokenizer(input_string): if tokinfo.type != tokenlib.ENCODING: yield tokinfo + def uncertainty_tokenizer(input_string): def _number_or_nan(token): if token.type == tokenlib.NUMBER or ( @@ -103,59 +105,71 @@ def _number_or_nan(token): def _get_possible_e(toklist, e_index): possible_e_token = toklist.lookahead(e_index) - if (possible_e_token.string[0]=="e" - and len(possible_e_token.string)>1 - and possible_e_token.string[1].isdigit()): + if ( + possible_e_token.string[0] == "e" + and len(possible_e_token.string) > 1 + and possible_e_token.string[1].isdigit() + ): end = possible_e_token.end possible_e = tokenize.TokenInfo( type=tokenlib.STRING, string=possible_e_token.string, start=possible_e_token.start, end=end, - line=possible_e_token.line) - elif (possible_e_token.string[0] in ["e", "E"] - and toklist.lookahead(e_index+1).string in ["+", "-"] - and toklist.lookahead(e_index+2).type==tokenlib.NUMBER): + line=possible_e_token.line, + ) + elif ( + possible_e_token.string[0] in ["e", "E"] + and toklist.lookahead(e_index + 1).string in ["+", "-"] + and toklist.lookahead(e_index + 2).type == tokenlib.NUMBER + ): # Special case: Python allows a leading zero for exponents (i.e., 042) but not for numbers - if toklist.lookahead(e_index+2).string == "0" and toklist.lookahead(e_index+3).type==tokenlib.NUMBER: - exp_number = toklist.lookahead(e_index+3).string - end = toklist.lookahead(e_index+3).end + if ( + toklist.lookahead(e_index + 2).string == "0" + and toklist.lookahead(e_index + 3).type == tokenlib.NUMBER + ): + exp_number = toklist.lookahead(e_index + 3).string + end = toklist.lookahead(e_index + 3).end else: - exp_number = toklist.lookahead(e_index+2).string - end = toklist.lookahead(e_index+2).end + exp_number = toklist.lookahead(e_index + 2).string + end = toklist.lookahead(e_index + 2).end possible_e = tokenize.TokenInfo( type=tokenlib.STRING, string=f"e{toklist.lookahead(e_index+1).string}{exp_number}", start=possible_e_token.start, end=end, - line=possible_e_token.line) + line=possible_e_token.line, + ) else: possible_e = None return possible_e def _apply_e_notation(mantissa, exponent): - if mantissa.string == 'nan': + if mantissa.string == "nan": return mantissa - if float(mantissa.string)==0.0: + if float(mantissa.string) == 0.0: return mantissa return tokenize.TokenInfo( type=tokenlib.NUMBER, string=f"{mantissa.string}{exponent.string}", start=mantissa.start, end=exponent.end, - line=exponent.line + line=exponent.line, ) def _finalize_e(nominal_value, std_dev, toklist, possible_e): nominal_value = _apply_e_notation(nominal_value, possible_e) std_dev = _apply_e_notation(std_dev, possible_e) - next(toklist) # consume 'e' and positive exponent value + next(toklist) # consume 'e' and positive exponent value if possible_e.string[1] in ["+", "-"]: - next(toklist) # consume "+" or "-" in exponent - exp_number = next(toklist) # consume exponent value - if exp_number.string == "0" and toklist.lookahead(0).type==tokenlib.NUMBER: + next(toklist) # consume "+" or "-" in exponent + exp_number = next(toklist) # consume exponent value + if ( + exp_number.string == "0" + and toklist.lookahead(0).type == tokenlib.NUMBER + ): exp_number = next(toklist) - assert(exp_number.end==end) + assert exp_number.end == end # We've already applied the number, we're just consuming all the tokens return nominal_value, std_dev @@ -164,7 +178,7 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): # in addition to marking the unknown character as ERRORTOKEN. Rather than # wading through all that vomit, just eliminate the problem # in the input by rewriting ± as +/-. - input_string = input_string.replace('±', '+/-') + input_string = input_string.replace("±", "+/-") toklist = tokens_with_lookahead(_plain_tokenizer(input_string)) for tokinfo in toklist: line = tokinfo.line @@ -195,7 +209,7 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): and toklist.lookahead(seen_minus + 5).string == ")" ): # ( NUM_OR_NAN +/- NUM_OR_NAN ) POSSIBLE_E_NOTATION - possible_e = _get_possible_e (toklist, seen_minus + 6) + possible_e = _get_possible_e(toklist, seen_minus + 6) if possible_e: end = possible_e.end else: @@ -204,19 +218,21 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): minus_op = next(toklist) yield minus_op nominal_value = next(toklist) - tokinfo = next(toklist) # consume '+' - next(toklist) # consume '/' + tokinfo = next(toklist) # consume '+' + next(toklist) # consume '/' plus_minus_op = tokenize.TokenInfo( type=tokenlib.OP, string="+/-", start=tokinfo.start, - end=next(toklist).end, # consume '-' + end=next(toklist).end, # consume '-' line=line, ) std_dev = next(toklist) - next(toklist) # consume final ')' + next(toklist) # consume final ')' if possible_e: - nominal_value, std_dev = _finalize_e(nominal_value, std_dev, toklist, possible_e) + nominal_value, std_dev = _finalize_e( + nominal_value, std_dev, toklist, possible_e + ) yield nominal_value yield plus_minus_op yield std_dev @@ -227,18 +243,18 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): and toklist.lookahead(2).string == ")" ): # NUM_OR_NAN ( NUM_OR_NAN ) POSSIBLE_E_NOTATION - possible_e = _get_possible_e (toklist, 3) + possible_e = _get_possible_e(toklist, 3) if possible_e: end = possible_e.end else: end = toklist.lookahead(2).end nominal_value = tokinfo - tokinfo = next(toklist) # consume '(' + tokinfo = next(toklist) # consume '(' plus_minus_op = tokenize.TokenInfo( type=tokenlib.OP, string="+/-", start=tokinfo.start, - end=tokinfo.end, # this is funky because there's no "+/-" in nominal(std_dev) notation + end=tokinfo.end, # this is funky because there's no "+/-" in nominal(std_dev) notation line=line, ) std_dev = next(toklist) @@ -250,9 +266,11 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): end=std_dev.end, line=line, ) - next(toklist) # consume final ')' + next(toklist) # consume final ')' if possible_e: - nominal_value, std_dev = _finalize_e(nominal_value, std_dev, toklist, possible_e) + nominal_value, std_dev = _finalize_e( + nominal_value, std_dev, toklist, possible_e + ) yield nominal_value yield plus_minus_op yield std_dev diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index f9b7d8fde..add5b4c01 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -895,6 +895,7 @@ def test_issue1611(self, module_registry): from uncertainties import ufloat from pint import pint_eval + pint_eval.tokenizer = pint_eval.uncertainty_tokenizer u1 = ufloat(1.2, 0.34) diff --git a/pint/testsuite/test_measurement.py b/pint/testsuite/test_measurement.py index 73abe01cb..74fb8b8c3 100644 --- a/pint/testsuite/test_measurement.py +++ b/pint/testsuite/test_measurement.py @@ -4,6 +4,7 @@ from pint.testsuite import QuantityTestCase, helpers from pint import pint_eval + # TODO: do not subclass from QuantityTestCase @helpers.requires_not_uncertainties() class TestNotMeasurement(QuantityTestCase): diff --git a/pint/testsuite/test_pint_eval.py b/pint/testsuite/test_pint_eval.py index b7be9c4d8..fc0012e6d 100644 --- a/pint/testsuite/test_pint_eval.py +++ b/pint/testsuite/test_pint_eval.py @@ -6,6 +6,7 @@ # This is how we enable the parsing of uncertainties # tokenizer = pint.pint_eval.uncertainty_tokenizer + class TestPintEval: def _test_one(self, input_text, parsed, preprocess=False): if preprocess: diff --git a/pint/toktest.py b/pint/toktest.py index 36b5cd128..a370fe27f 100644 --- a/pint/toktest.py +++ b/pint/toktest.py @@ -21,8 +21,8 @@ "8.0 ± 4.0 m", "8.0(4)m", "8.0(.4)m", - "8.0(-4)m", # error! - "pint == wonderfulness ^ N + - + / - * ± m J s" + "8.0(-4)m", # error! + "pint == wonderfulness ^ N + - + / - * ± m J s", ] for line in input_lines: From 945e93f4ebd8c0c154351956829269d4efa783c3 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 25 Jun 2023 11:13:56 -0400 Subject: [PATCH 16/24] Make ruff happy Remove unused redefinition of tokenizer in toktest.py. Also remove unnecessary import of pint_eval from top-level (it's imported inside the function definition that needs it). Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/testsuite/test_measurement.py | 1 - pint/toktest.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pint/testsuite/test_measurement.py b/pint/testsuite/test_measurement.py index 74fb8b8c3..f3716289e 100644 --- a/pint/testsuite/test_measurement.py +++ b/pint/testsuite/test_measurement.py @@ -2,7 +2,6 @@ from pint import DimensionalityError from pint.testsuite import QuantityTestCase, helpers -from pint import pint_eval # TODO: do not subclass from QuantityTestCase diff --git a/pint/toktest.py b/pint/toktest.py index a370fe27f..d400262da 100644 --- a/pint/toktest.py +++ b/pint/toktest.py @@ -6,7 +6,7 @@ from tokenize import NUMBER, STRING, NAME, OP import token as tokenlib from io import BytesIO -from pint.pint_eval import _plain_tokenizer, tokenizer, uncertainty_tokenizer +from pint.pint_eval import _plain_tokenizer, uncertainty_tokenizer from pint.pint_eval import tokens_with_lookahead tokenizer = _plain_tokenizer From 397969d02e67e4c9db09c4f93b6933ddfdd51642 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 25 Jun 2023 11:37:46 -0400 Subject: [PATCH 17/24] Make ruff happier Fix ruff errors missed in previous commit. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/compat.py | 2 +- pint/toktest.py | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/pint/compat.py b/pint/compat.py index 71dbc264b..4f34a1843 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -17,7 +17,7 @@ from numbers import Number from collections.abc import Mapping from typing import Any, NoReturn, Callable, Optional, Union -from collections.abc import Generator, Iterable +from collections.abc import Iterable try: from uncertainties import UFloat, ufloat diff --git a/pint/toktest.py b/pint/toktest.py index d400262da..dc0564ba7 100644 --- a/pint/toktest.py +++ b/pint/toktest.py @@ -1,13 +1,6 @@ -import pint -from pint import Quantity as Q_ -import re import tokenize -from tokenize import NUMBER, STRING, NAME, OP -import token as tokenlib -from io import BytesIO from pint.pint_eval import _plain_tokenizer, uncertainty_tokenizer -from pint.pint_eval import tokens_with_lookahead tokenizer = _plain_tokenizer From ec4123c77cb3aebcd18de577d735bbf47c7800c6 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 25 Jun 2023 11:49:17 -0400 Subject: [PATCH 18/24] Update toktest.py Fix whitespace error created by `ruff --fix` that `black` didn't like. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/toktest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pint/toktest.py b/pint/toktest.py index dc0564ba7..ef606d6a9 100644 --- a/pint/toktest.py +++ b/pint/toktest.py @@ -1,4 +1,3 @@ - import tokenize from pint.pint_eval import _plain_tokenizer, uncertainty_tokenizer From 032d972795009264ba13ed9246bb4ea12539d8a5 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 25 Jun 2023 20:40:32 -0400 Subject: [PATCH 19/24] Update test_util.py Follow deprecation of use_decimal from pint/util.py Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/testsuite/test_util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pint/testsuite/test_util.py b/pint/testsuite/test_util.py index c16c7f33d..70136cf35 100644 --- a/pint/testsuite/test_util.py +++ b/pint/testsuite/test_util.py @@ -193,9 +193,9 @@ def test_calculate(self): assert "seconds" / z() == ParserHelper(0.5, seconds=1, meter=-2) assert dict(seconds=1) / z() == ParserHelper(0.5, seconds=1, meter=-2) - def _test_eval_token(self, expected, expression, use_decimal=False): + def _test_eval_token(self, expected, expression): token = next(pint_eval.tokenizer(expression)) - actual = ParserHelper.eval_token(token, use_decimal=use_decimal) + actual = ParserHelper.eval_token(token) assert expected == actual assert type(expected) == type(actual) From 772da53589751a5ed725d47049c692ccdccc705b Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Wed, 28 Jun 2023 16:29:22 -0400 Subject: [PATCH 20/24] Fix additional regressions in test suite If we have the uncertainties library loaded, go ahead and use the uncertainty_tokenizer by default. This fixes problems with standard Pandas tests that expect the tokenizer to do the right thing without any special setup. Also, prevent exception when a loop in consensus_name_attr (pandas-dev/pandas/core/common.py(86))) tests equality with a None argument. Otherwise the zero_or_nan test raises an exception. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/facets/plain/quantity.py | 3 +++ pint/pint_eval.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pint/facets/plain/quantity.py b/pint/facets/plain/quantity.py index bd70532e2..d7cb2a9c2 100644 --- a/pint/facets/plain/quantity.py +++ b/pint/facets/plain/quantity.py @@ -1324,6 +1324,9 @@ def bool_result(value): # We compare to the plain class of PlainQuantity because # each PlainQuantity class is unique. if not isinstance(other, PlainQuantity): + if other is None: + # A loop in pandas-dev/pandas/core/common.py(86)consensus_name_attr() can result in OTHER being None + return bool_result(False) if zero_or_nan(other, True): # Handle the special case in which we compare to zero or NaN # (or an array of zeros or NaNs) diff --git a/pint/pint_eval.py b/pint/pint_eval.py index f020718ca..3f030505b 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -278,7 +278,10 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e): yield tokinfo -tokenizer = _plain_tokenizer +if HAS_UNCERTAINTIES: + tokenizer = uncertainty_tokenizer +else: + tokenizer = _plain_tokenizer import typing From 3c547477c71299d79d54bb371af9c2abf6c0dc03 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Sun, 2 Jul 2023 09:12:17 -0400 Subject: [PATCH 21/24] Update quantity.py Teach Pint's PlainQuantity about the Pandas pd.NA value so that ndim works. Otherwise, it naively delegates to NumpyQuantity, which is the road to perdition for PintArrays. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/facets/plain/quantity.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pint/facets/plain/quantity.py b/pint/facets/plain/quantity.py index d7cb2a9c2..ab70302d0 100644 --- a/pint/facets/plain/quantity.py +++ b/pint/facets/plain/quantity.py @@ -164,6 +164,8 @@ class PlainQuantity(Generic[MagnitudeT], PrettyIPython, SharedRegistryObject): def ndim(self) -> int: if isinstance(self.magnitude, numbers.Number): return 0 + if str(self.magnitude) == "": + return 0 return self.magnitude.ndim @property From 4e20d998de32715047306bcabe5ef3548cfda48f Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Fri, 15 Sep 2023 12:54:43 -0400 Subject: [PATCH 22/24] Make `babel` a dependency for testbase Here's hoping this fixes the CI/CD problem with test_1400. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4b6b7312d..cdec7775e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,8 @@ testbase = [ "pytest", "pytest-cov", "pytest-subtests", - "pytest-benchmark" + "pytest-benchmark", + "babel" ] test = [ "pytest", From f55b8deeb1c8809b34d0cdb5e74e1341bbbfc57c Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Fri, 15 Sep 2023 12:58:18 -0400 Subject: [PATCH 23/24] Update .readthedocs.yaml Removing `system_packages: false` as suggested by @keewis Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- .readthedocs.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index d180754e6..7d72db2a1 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -11,4 +11,3 @@ python: - requirements: requirements_docs.txt - method: pip path: . - system_packages: false From 00f08f3e6bdcf7b51895501382744e8b9f2e1037 Mon Sep 17 00:00:00 2001 From: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> Date: Fri, 15 Sep 2023 14:11:10 -0400 Subject: [PATCH 24/24] Fix failing tests Fix isnan to use unp.isnan as appropriate for both duck_array_type and objects of UFloat types. Fix a minor typo in pint/facets/__init__.py comment. In test_issue_1400, use decorators to ensure babel library is loaded when needed. pyproject.toml: revert change to testbase; we fixed with decorators instead. Signed-off-by: Michael Tiemann <72577720+MichaelTiemannOSC@users.noreply.github.com> --- pint/compat.py | 17 ++++++++++++----- pint/facets/__init__.py | 2 +- pint/testsuite/test_issues.py | 1 + pyproject.toml | 3 +-- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/pint/compat.py b/pint/compat.py index 4f34a1843..552ff3f7e 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -325,21 +325,28 @@ def isnan(obj: Any, check_all: bool) -> Union[bool, Iterable[bool]]: Always return False for non-numeric types. """ if is_duck_array_type(type(obj)): - if obj.dtype.kind in "if": + if obj.dtype.kind in "ifc": out = np.isnan(obj) elif obj.dtype.kind in "Mm": out = np.isnat(obj) else: - # Not a numeric or datetime type - out = np.full(obj.shape, False) + if HAS_UNCERTAINTIES: + try: + out = unp.isnan(obj) + except TypeError: + # Not a numeric or UFloat type + out = np.full(obj.shape, False) + else: + # Not a numeric or datetime type + out = np.full(obj.shape, False) return out.any() if check_all else out if isinstance(obj, np_datetime64): return np.isnat(obj) + elif HAS_UNCERTAINTIES and isinstance(obj, UFloat): + return unp.isnan(obj) try: return math.isnan(obj) except TypeError: - if HAS_UNCERTAINTIES: - return unp.isnan(obj) return False diff --git a/pint/facets/__init__.py b/pint/facets/__init__.py index 4fd1597a6..22fbc6ce1 100644 --- a/pint/facets/__init__.py +++ b/pint/facets/__init__.py @@ -7,7 +7,7 @@ keeping each part small enough to be hackable. Each facet contains one or more of the following modules: - - definitions: classes describing an specific unit related definiton. + - definitions: classes describing specific unit-related definitons. These objects must be immutable, pickable and not reference the registry (e.g. ContextDefinition) - objects: classes and functions that encapsulate behavior (e.g. Context) - registry: implements a subclass of PlainRegistry or class that can be diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index add5b4c01..c98ac61bf 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -881,6 +881,7 @@ def test_issue_1300(self): m = module_registry.Measurement(1, 0.1, "meter") assert m.default_format == "~P" + @helpers.requires_babel() def test_issue_1400(self, sess_registry): q1 = 3 * sess_registry.W q2 = 3 * sess_registry.W / sess_registry.cm diff --git a/pyproject.toml b/pyproject.toml index cdec7775e..4b6b7312d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,8 +44,7 @@ testbase = [ "pytest", "pytest-cov", "pytest-subtests", - "pytest-benchmark", - "babel" + "pytest-benchmark" ] test = [ "pytest",