diff --git a/python/paddle/pir/math_op_patch.py b/python/paddle/pir/math_op_patch.py index f12603c56c5c7..8d43e53c95239 100644 --- a/python/paddle/pir/math_op_patch.py +++ b/python/paddle/pir/math_op_patch.py @@ -477,18 +477,53 @@ def _T_(self): return _C_ops.transpose(self, perm) def _int_(self): - raise TypeError( - "int(Value) is not supported in static graph mode. If you are using @to_static, you can try this:\n" - "1. If you want to get the value of Value, you can switch to non-fullgraph mode by setting @to_static(full_graph=True).\n" - "2. If you want to run it in full graph mode, you need use Value.astype(paddle.int32), and do not use int(Value)." - ) + error_msg = """\ + int(Tensor) is not supported in static graph mode. Because it's value is not available during the static mode. + It's usually triggered by the logging implicitly, for example: + >>> logging.info("The value of x is: {int(x)}") + ^ `x` is Tensor, `int(x)` triggers int(Tensor) + + There are two common workarounds available: + If you are logging Tensor values, then consider logging only at dynamic graphs, for example: + + Modify the following code + >>> logging.info("The value of x is: {int(x)}") + to + >>> if paddle.in_dynamic_mode(): + ... logging.info("The value of x is: {int(x)}") + + If you need to convert the Tensor type, for example: + Modify the following code + >>> x = int(x) + to + >>> x = x.astype("int64") + """ + + raise TypeError(textwrap.dedent(error_msg)) def _float_(self): - raise TypeError( - "float(Value) is not supported in static graph mode. If you are using @to_static, you can try this:\n" - "1. If you want to get the value of Value, you can switch to non-fullgraph mode by setting @to_static(full_graph=True).\n" - "2. If you want to run it in full graph mode, you need use Value directly, and do not use float(Value)." - ) + error_msg = """\ + float(Tensor) is not supported in static graph mode. Because it's value is not available during the static mode. + It's usually triggered by the logging implicitly, for example: + >>> logging.info("The value of x is: {float(x)}") + ^ `x` is Tensor, `float(x)` triggers float(Tensor) + + There are two common workarounds available: + If you are logging Tensor values, then consider logging only at dynamic graphs, for example: + + Modify the following code + >>> logging.info("The value of x is: {float(x)}") + to + >>> if paddle.in_dynamic_mode(): + ... logging.info("The value of x is: {float(x)}") + + If you need to convert the Tensor type, for example: + Modify the following code + >>> x = float(x) + to + >>> x = x.astype("float64") + """ + raise TypeError(textwrap.dedent(error_msg)) def _bool_(self): error_msg = """\