From 7cd6ab4705b4ab1e223af23c29ace4028a53334b Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Thu, 26 Sep 2024 09:05:48 +0200 Subject: [PATCH] Async API (#598) * Refactor generating methods/props that are async * Better logic * Codegen + update _classes.py * fix issue, plus add tests * add comment * Add docs * apply codegenn to _api.py * Tweak for prop * Backwards compat * fix codegen test * Fix tests * Replace method usage, and disbale backwards compat * forgot one * fix * format * Logic to disable sync method for portability testing * format and enable backwards compat again * codegen --- codegen/README.md | 2 +- codegen/apipatcher.py | 168 ++++++++++++++++---- codegen/idlparser.py | 3 +- codegen/tests/test_codegen_apipatcher.py | 56 ++++++- codegen/tests/test_codegen_result.py | 18 +++ docs/backends.rst | 4 +- docs/guide.rst | 6 +- docs/start.rst | 2 +- docs/wgpu.rst | 25 ++- examples/compute_noop.py | 4 +- examples/compute_timestamps.py | 6 +- examples/cube.py | 6 +- examples/imgui_backend_sea.py | 4 +- examples/imgui_basic_example.py | 4 +- examples/imgui_cmap_picker.py | 4 +- examples/imgui_multi_canvas.py | 4 +- examples/imgui_renderer_sea.py | 4 +- examples/triangle.py | 4 +- examples/triangle_glsl.py | 4 +- tests/test_api.py | 11 +- tests/test_gui_glfw.py | 4 +- tests/test_set_constant.py | 4 +- tests/test_wgpu_native_basics.py | 24 +-- tests/test_wgpu_native_buffer.py | 36 ++--- tests/test_wgpu_native_query_set.py | 4 +- tests/test_wgpu_native_texture.py | 8 +- tests/test_wgpu_vertex_instance.py | 6 +- tests_mem/test_destroy.py | 4 +- tests_mem/test_objects.py | 6 +- wgpu/__init__.py | 2 +- wgpu/_classes.py | 192 +++++++++++++++++------ wgpu/backends/__init__.py | 2 +- wgpu/backends/js_webgpu/__init__.py | 2 +- wgpu/backends/rs.py | 2 +- wgpu/backends/wgpu_native/__init__.py | 3 +- wgpu/backends/wgpu_native/_api.py | 120 ++++++++++---- wgpu/backends/wgpu_native/extras.py | 12 +- wgpu/resources/codegen_report.md | 8 +- wgpu/utils/device.py | 4 +- 39 files changed, 567 insertions(+), 215 deletions(-) create mode 100644 codegen/tests/test_codegen_result.py diff --git a/codegen/README.md b/codegen/README.md index d997f57f..5c8e2684 100644 --- a/codegen/README.md +++ b/codegen/README.md @@ -69,7 +69,7 @@ In some cases we may want to deviate from the WebGPU API, because well ... Pytho Other changes include: * Where in JS the input args are provided via a dict, we use kwargs directly. Nevertheless, some input args have subdicts (and sub-sub-dicts) -* For methods that are async in IDL, we also provide sync methods. The Async method names have an "_async" suffix. +* For methods that are async in JavaScript (i.e return a `Promise`), we provide both an asynchronous and synchronous variant, indicated by an `_async` and `_sync` suffix. ### Codegen summary diff --git a/codegen/apipatcher.py b/codegen/apipatcher.py index 1c18a099..d6058a66 100644 --- a/codegen/apipatcher.py +++ b/codegen/apipatcher.py @@ -179,7 +179,7 @@ def patch_properties(self, classname, i1, i2): elif "@apidiff.hide" in pre_lines: pass # continue as normal old_line = self.lines[j1] - new_line = f" def {propname}(self):" + new_line = self.get_property_def(classname, propname) if old_line != new_line: fixme_line = " # FIXME: was " + old_line.split("def ", 1)[-1] lines = [fixme_line, new_line] @@ -241,7 +241,7 @@ def get_missing_properties(self, classname, seen_props): if propname not in seen_props: lines.append(" # FIXME: new prop to implement") lines.append(" @property") - lines.append(f" def {propname}(self):") + lines.append(self.get_property_def(classname, propname)) lines.append(" raise NotImplementedError()") lines.append("") return lines @@ -265,16 +265,105 @@ class IdlPatcherMixin: def __init__(self): super().__init__() self.idl = get_idl_parser() + self.detect_async_props_and_methods() + + def detect_async_props_and_methods(self): + + self.async_idl_names = async_idl_names = {} # (sync-name, async-name) + + for classname, interface in self.idl.classes.items(): + for namedict in [interface.attributes, interface.functions]: + for name_idl, idl_line in namedict.items(): + idl_result = idl_line.split(name_idl)[0] + if "Promise" in idl_result: + # We found an async property or method. + name_idl_base = name_idl + if name_idl.endswith("Async"): + name_idl_base = name_idl[:-5] + key = classname, name_idl_base + # Now we determine the kind + if name_idl_base != name_idl and name_idl_base in namedict: + # Has both + async_idl_names[key] = name_idl_base, name_idl + else: + # Only has async + async_idl_names[key] = None, name_idl + + def get_idl_name_variants(self, classname, base_name): + """Returns the names of an idl prop/method for its sync and async variant. + Either can be None. + """ + # Must be a base name, without the suffix + assert not base_name.lower().endswith(("sync", "async")) + + key = classname, base_name + default = base_name, None + return self.async_idl_names.get(key, default) + + def name2idl(self, classname, name_py): + """Map a python propname/methodname to the idl variant. + Take async into account. + """ + if name_py == "__init__": + return "constructor" + + # Get idl base name + if name_py.endswith(("_sync", "_async")): + name_idl_base = to_camel_case(name_py.rsplit("_", 1)[0]) + else: + name_idl_base = to_camel_case(name_py) - def name2idl(self, name): - m = {"__init__": "constructor"} - name = m.get(name, name) - return to_camel_case(name) + # Get idl variant names + idl_sync, idl_async = self.get_idl_name_variants(classname, name_idl_base) - def name2py(self, name): - m = {"constructor": "__init__"} - name = m.get(name, name) - return to_snake_case(name) + # Triage + if idl_sync and idl_async: + if name_py.endswith("_async"): + return idl_async + elif name_py.endswith("_sync"): + return name_idl_base + "InvalidVariant" + else: + return idl_sync + elif idl_async: + if name_py.endswith("_async"): + return idl_async + elif name_py.endswith("_sync"): + return idl_async + else: + return name_idl_base + "InvalidVariant" + else: # idl_sync only + if name_py.endswith("_async"): + return name_idl_base + "InvalidVariant" + elif name_py.endswith("_sync"): + return name_idl_base + "InvalidVariant" + else: + return idl_sync + + def name2py_names(self, classname, name_idl): + """Map a idl propname/methodname to the python variants. + Take async into account. Returns a list with one or two names; + for async props/methods Python has the sync and the async variant. + """ + + if name_idl == "constructor": + return ["__init__"] + + # Get idl base name + name_idl_base = name_idl + if name_idl.endswith("Async"): + name_idl_base = name_idl[:-5] + name_py_base = to_snake_case(name_idl_base) + + # Get idl variant names + idl_sync, idl_async = self.get_idl_name_variants(classname, name_idl_base) + + if idl_sync and idl_async: + return [to_snake_case(idl_sync), name_py_base + "_async"] + elif idl_async: + return [name_py_base + "_sync", name_py_base + "_async"] + else: + assert idl_sync == name_idl_base + return [name_py_base] def class_is_known(self, classname): return classname in self.idl.classes @@ -295,22 +384,28 @@ def get_class_def(self, classname): bases = "" if not bases else f"({', '.join(bases)})" return f"class {classname}{bases}:" + def get_property_def(self, classname, propname): + attributes = self.idl.classes[classname].attributes + name_idl = self.name2idl(classname, propname) + assert name_idl in attributes + + line = "def " + to_snake_case(propname) + "(self):" + if propname.endswith("_async"): + line = "async " + line + return " " + line + def get_method_def(self, classname, methodname): - # Get the corresponding IDL line functions = self.idl.classes[classname].functions - name_idl = self.name2idl(methodname) - if methodname.endswith("_async") and name_idl not in functions: - name_idl = self.name2idl(methodname.replace("_async", "")) - elif name_idl not in functions and name_idl + "Async" in functions: - name_idl += "Async" - idl_line = functions[name_idl] + name_idl = self.name2idl(classname, methodname) + assert name_idl in functions # Construct preamble preamble = "def " + to_snake_case(methodname) + "(" - if "async" in methodname: + if methodname.endswith("_async"): preamble = "async " + preamble # Get arg names and types + idl_line = functions[name_idl] args = idl_line.split("(", 1)[1].split(")", 1)[0].split(",") args = [arg.strip() for arg in args if arg.strip()] raw_defaults = [arg.partition("=")[2].strip() for arg in args] @@ -361,28 +456,31 @@ def _arg_from_struct_field(self, field): return result def prop_is_known(self, classname, propname): - propname_idl = self.name2idl(propname) - return propname_idl in self.idl.classes[classname].attributes + attributes = self.idl.classes[classname].attributes + propname_idl = self.name2idl(classname, propname) + return propname_idl if propname_idl in attributes else None def method_is_known(self, classname, methodname): functions = self.idl.classes[classname].functions - name_idl = self.name2idl(methodname) - if "_async" in methodname and name_idl not in functions: - name_idl = self.name2idl(methodname.replace("_async", "")) - elif name_idl not in functions and name_idl + "Async" in functions: - name_idl += "Async" - return name_idl if name_idl in functions else None + methodname_idl = self.name2idl(classname, methodname) + return methodname_idl if methodname_idl in functions else None def get_class_names(self): return list(self.idl.classes.keys()) def get_required_prop_names(self, classname): - propnames_idl = self.idl.classes[classname].attributes.keys() - return [self.name2py(x) for x in propnames_idl] + attributes = self.idl.classes[classname].attributes + names = [] + for name_idl in attributes.keys(): + names.extend(self.name2py_names(classname, name_idl)) + return names def get_required_method_names(self, classname): - methodnames_idl = self.idl.classes[classname].functions.keys() - return [self.name2py(x) for x in methodnames_idl] + functions = self.idl.classes[classname].functions + names = [] + for name_idl in functions.keys(): + names.extend(self.name2py_names(classname, name_idl)) + return names class BaseApiPatcher(IdlPatcherMixin, AbstractApiPatcher): @@ -398,14 +496,16 @@ def get_class_comment(self, classname): return None def get_prop_comment(self, classname, propname): - if self.prop_is_known(classname, propname): - propname_idl = self.name2idl(propname) - return " # IDL: " + self.idl.classes[classname].attributes[propname_idl] + attributes = self.idl.classes[classname].attributes + name_idl = self.prop_is_known(classname, propname) + if name_idl: + return " # IDL: " + attributes[name_idl] def get_method_comment(self, classname, methodname): + functions = self.idl.classes[classname].functions name_idl = self.method_is_known(classname, methodname) if name_idl: - return " # IDL: " + self.idl.classes[classname].functions[name_idl] + return " # IDL: " + functions[name_idl] class BackendApiPatcher(AbstractApiPatcher): diff --git a/codegen/idlparser.py b/codegen/idlparser.py index 5063c91d..6446a2b6 100644 --- a/codegen/idlparser.py +++ b/codegen/idlparser.py @@ -67,8 +67,7 @@ class IdlParser: * enums: a dict mapping the (Pythonic) enum name to a dict of field-value pairs. * structs: a dict mapping the (Pythonic) struct name to a dict of StructField objects. - * functions: a dict mapping the (normalized) func name to the line defining the - function. + * classes: a dict mapping the (normalized) class name an Interface object. """ diff --git a/codegen/tests/test_codegen_apipatcher.py b/codegen/tests/test_codegen_apipatcher.py index 6ef5bb13..31d66a89 100644 --- a/codegen/tests/test_codegen_apipatcher.py +++ b/codegen/tests/test_codegen_apipatcher.py @@ -2,7 +2,7 @@ """ from codegen.utils import blacken -from codegen.apipatcher import CommentRemover, AbstractCommentInjector +from codegen.apipatcher import CommentRemover, AbstractCommentInjector, IdlPatcherMixin def dedent(code): @@ -110,6 +110,60 @@ def eggs(self): assert code2 == code3 +def test_async_api_logic(): + + class Object(object): + pass + + class OtherIdlPatcherMixin(IdlPatcherMixin): + def __init__(self): + cls = Object() + cls.attributes = { + "prop1": "x prop1 bla", + "prop2": "Promise prop2 bla", + } + cls.functions = { + "method1": "x method1 bla", + "method2": "Promise method2 bla", + "method3Async": "Promise method3 bla", + "method3": "x method3 bla", + } + + self.idl = Object() + self.idl.classes = {"Foo": cls} + + patcher = OtherIdlPatcherMixin() + patcher.detect_async_props_and_methods() + + # Normal prop + assert patcher.name2idl("Foo", "prop1") == "prop1" + assert patcher.name2idl("Foo", "prop1_sync") == "prop1InvalidVariant" + assert patcher.name2idl("Foo", "prop1_async") == "prop1InvalidVariant" + + # Unknow prop, name still works + assert patcher.name2idl("Foo", "prop_unknown") == "propUnknown" + + # Async prop + assert patcher.name2idl("Foo", "prop2_async") == "prop2" + assert patcher.name2idl("Foo", "prop2_sync") == "prop2" + assert patcher.name2idl("Foo", "prop2") == "prop2InvalidVariant" + + # Normal method + assert patcher.name2idl("Foo", "method1") == "method1" + assert patcher.name2idl("Foo", "method1_sync") == "method1InvalidVariant" + assert patcher.name2idl("Foo", "method1_async") == "method1InvalidVariant" + + # Async method + assert patcher.name2idl("Foo", "method2_async") == "method2" + assert patcher.name2idl("Foo", "method2_sync") == "method2" + assert patcher.name2idl("Foo", "method2") == "method2InvalidVariant" + + # Async method that also has sync variant in JS + assert patcher.name2idl("Foo", "method3_async") == "method3Async" + assert patcher.name2idl("Foo", "method3") == "method3" + assert patcher.name2idl("Foo", "method3_sync") == "method3InvalidVariant" + + if __name__ == "__main__": for func in list(globals().values()): if callable(func) and func.__name__.startswith("test_"): diff --git a/codegen/tests/test_codegen_result.py b/codegen/tests/test_codegen_result.py new file mode 100644 index 00000000..44c3b830 --- /dev/null +++ b/codegen/tests/test_codegen_result.py @@ -0,0 +1,18 @@ +""" Test some aspects of the generated code. +""" + +from codegen.files import read_file + + +def test_async_methods_and_props(): + # Test that only and all aync methods are suffixed with '_async' + + for fname in ["_classes.py", "backends/wgpu_native/_api.py"]: + code = read_file(fname) + for line in code.splitlines(): + line = line.strip() + if line.startswith("def "): + assert not line.endswith("_async"), line + elif line.startswith("async def "): + name = line.split("def", 1)[1].split("(")[0].strip() + assert name.endswith("_async"), line diff --git a/docs/backends.rst b/docs/backends.rst index 8b985140..93a2d4fd 100644 --- a/docs/backends.rst +++ b/docs/backends.rst @@ -44,7 +44,7 @@ It also works out of the box, because the wgpu-native DLL is shipped with wgpu-p The wgpu_native backend provides a few extra functionalities: -.. py:function:: wgpu.backends.wgpu_native.request_device(adapter, trace_path, *, label="", required_features, required_limits, default_queue) +.. py:function:: wgpu.backends.wgpu_native.request_device_sync(adapter, trace_path, *, label="", required_features, required_limits, default_queue) An alternative to :func:`wgpu.GPUAdapter.request_adapter`, that streams a trace of all low level calls to disk, so the visualization can be replayed (also on other systems), @@ -88,7 +88,7 @@ You must tell the adapter to create a device that supports push constants, and you must tell it the number of bytes of push constants that you are using. Overestimating is okay:: - device = adapter.request_device( + device = adapter.request_device_sync( required_features=["push-constants"], required_limits={"max-push-constant-size": 256}, ) diff --git a/docs/guide.rst b/docs/guide.rst index 118443ba..5f03221b 100644 --- a/docs/guide.rst +++ b/docs/guide.rst @@ -43,8 +43,8 @@ you can obtain a device. .. code-block:: py - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device() + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync() The ``wgpu.gpu`` object is the API entrypoint (:class:`wgpu.GPU`). It contains just a handful of functions, including ``request_adapter()``. The device is used to create most other GPU objects. @@ -232,7 +232,7 @@ You can run your application via RenderDoc, which is able to capture a frame, including all API calls, objects and the complete pipeline state, and display all of that information within a nice UI. -You can use ``adapter.request_device()`` to provide a directory path +You can use ``adapter.request_device_sync()`` to provide a directory path where a trace of all API calls will be written. This trace can then be used to re-play your use-case elsewhere (it's cross-platform). diff --git a/docs/start.rst b/docs/start.rst index 6ccc490b..218a9ea3 100644 --- a/docs/start.rst +++ b/docs/start.rst @@ -99,7 +99,7 @@ You can verify whether the `"DiscreteGPU"` adapters are found: import wgpu import pprint - for a in wgpu.gpu.enumerate_adapters(): + for a in wgpu.gpu.enumerate_adapters_sync(): pprint.pprint(a.info) If you are using a remote frame buffer via `jupyter-rfb `_ we also recommend installing the following for optimal performance: diff --git a/docs/wgpu.rst b/docs/wgpu.rst index 8df5cf63..04900403 100644 --- a/docs/wgpu.rst +++ b/docs/wgpu.rst @@ -34,10 +34,14 @@ Some arguments have a default value. Most do not. Differences from WebGPU ----------------------- -This API is derived from the WebGPU spec, but differs in a few ways. -For example, methods that in WebGPU accept a descriptor/struct/dict, -here accept the fields in that struct as keyword arguments. +This API is derived from the WebGPU spec, but differs in a few ways: +* Methods names are snake_case (instead of camelCase). +* Enums and flags are represented as objects with snake_case field names. +* Methods that in WebGPU accept a single descriptor, will accept the fields of that descriptor as keyword arguments. +* Async methods have a different name, read more below. + +Further changes: .. autodata:: wgpu._classes.apidiff :annotation: Differences of base API: @@ -47,6 +51,21 @@ Each backend may implement extra functionality on top of the base API. This is listed in :doc:`backends `. +Async code +---------- + +Some methods and properties in the WebGPU API are asynchronous. In wgpu-py, these methods +are always suffixed with ``_async``. These method also have a synchronous variant, which +come in two flafours: + +* If the method has the plain method name (no suffix), the synchronous method is + available in WebGPU as well. There's no problem to use this variant. +* If the method ends with ``_sync``, this is a convenience method, added in + wgpu-py to fully support synchronous code. However, the synchronous variant is + not part of the WebGPU spec, and as a consequence, code that uses this method + is less portable (to e.g. pyodide/pyscript). + + Overview -------- diff --git a/examples/compute_noop.py b/examples/compute_noop.py index 8e9d08b5..9be2b906 100644 --- a/examples/compute_noop.py +++ b/examples/compute_noop.py @@ -62,7 +62,7 @@ device = wgpu.utils.get_default_device() # Show all available adapters -adapters = wgpu.gpu.enumerate_adapters() +adapters = wgpu.gpu.enumerate_adapters_sync() for a in adapters: print(a.summary) @@ -73,7 +73,7 @@ # adapter = a # break # assert adapter is not None -# device = adapter.request_device() +# device = adapter.request_device_sync() # %% cshader = device.create_shader_module(code=shader_source) diff --git a/examples/compute_timestamps.py b/examples/compute_timestamps.py index b22564d9..60afddc9 100644 --- a/examples/compute_timestamps.py +++ b/examples/compute_timestamps.py @@ -41,10 +41,12 @@ for i in range(n): data2[i] = i * 2 -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") # Request a device with the timestamp_query feature, so we can profile our computation -device = adapter.request_device(required_features=[wgpu.FeatureName.timestamp_query]) +device = adapter.request_device_sync( + required_features=[wgpu.FeatureName.timestamp_query] +) cshader = device.create_shader_module(code=shader_source) # Create buffer objects, input buffer is mapped. diff --git a/examples/cube.py b/examples/cube.py index c1b1a81c..09936fe9 100644 --- a/examples/cube.py +++ b/examples/cube.py @@ -12,7 +12,7 @@ print("Available adapters on this system:") -for a in wgpu.gpu.enumerate_adapters(): +for a in wgpu.gpu.enumerate_adapters_sync(): print(a.summary) @@ -22,8 +22,8 @@ canvas = WgpuCanvas(title="wgpu cube", size=(640, 480)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/imgui_backend_sea.py b/examples/imgui_backend_sea.py index f21b7996..77d6af87 100644 --- a/examples/imgui_backend_sea.py +++ b/examples/imgui_backend_sea.py @@ -15,9 +15,9 @@ canvas = WgpuCanvas(title="imgui_sea", size=(800, 450), max_fps=60) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") -device = adapter.request_device() +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/imgui_basic_example.py b/examples/imgui_basic_example.py index 0012a942..50873dd7 100644 --- a/examples/imgui_basic_example.py +++ b/examples/imgui_basic_example.py @@ -15,8 +15,8 @@ canvas = WgpuCanvas(title="imgui", size=(640, 480)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() app_state = {"text": "Hello, World\nLorem ipsum, etc.\netc."} imgui_renderer = ImguiRenderer(device, canvas) diff --git a/examples/imgui_cmap_picker.py b/examples/imgui_cmap_picker.py index 2c71e58a..3c91d18f 100644 --- a/examples/imgui_cmap_picker.py +++ b/examples/imgui_cmap_picker.py @@ -26,8 +26,8 @@ canvas = WgpuCanvas(title="imgui", size=(512, 256)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() imgui_renderer = ImguiRenderer(device, canvas) diff --git a/examples/imgui_multi_canvas.py b/examples/imgui_multi_canvas.py index 8495666c..37972cfe 100644 --- a/examples/imgui_multi_canvas.py +++ b/examples/imgui_multi_canvas.py @@ -17,8 +17,8 @@ canvases = [canvas1, canvas2, canvas3] # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() # create a imgui renderer for each canvas imgui_renderer1 = ImguiRenderer(device, canvas1) diff --git a/examples/imgui_renderer_sea.py b/examples/imgui_renderer_sea.py index 3fba3094..4ebba4a0 100644 --- a/examples/imgui_renderer_sea.py +++ b/examples/imgui_renderer_sea.py @@ -15,9 +15,9 @@ canvas = WgpuCanvas(title="imgui_sea", size=(800, 450), max_fps=60) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") -device = adapter.request_device() +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/triangle.py b/examples/triangle.py index 43e82d89..11f43c62 100644 --- a/examples/triangle.py +++ b/examples/triangle.py @@ -62,8 +62,8 @@ def main(canvas, power_preference="high-performance", limits=None): """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) + adapter = wgpu.gpu.request_adapter_sync(power_preference=power_preference) + device = adapter.request_device_sync(required_limits=limits) return _main(canvas, device) diff --git a/examples/triangle_glsl.py b/examples/triangle_glsl.py index 146a525e..67b2638e 100644 --- a/examples/triangle_glsl.py +++ b/examples/triangle_glsl.py @@ -47,8 +47,8 @@ def main(canvas, power_preference="high-performance", limits=None): """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) + adapter = wgpu.gpu.request_adapter_sync(power_preference=power_preference) + device = adapter.request_device_sync(required_limits=limits) return _main(canvas, device) diff --git a/tests/test_api.py b/tests/test_api.py index 313ce5b6..52599f62 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -16,13 +16,13 @@ def test_basic_api(): assert isinstance(wgpu.gpu, wgpu.GPU) # Entrypoint funcs - assert wgpu.gpu.request_adapter + assert wgpu.gpu.request_adapter_sync assert wgpu.gpu.request_adapter_async - code1 = wgpu.GPU.request_adapter.__code__ + code1 = wgpu.GPU.request_adapter_sync.__code__ code2 = wgpu.GPU.request_adapter_async.__code__ - nargs1 = code1.co_argcount + code1.co_kwonlyargcount - assert code1.co_varnames[:nargs1] == code2.co_varnames + # nargs1 = code1.co_argcount + code1.co_kwonlyargcount + assert code1.co_varnames == code2.co_varnames assert repr(wgpu.classes.GPU()).startswith( " 0 # Check adapter summaries @@ -353,13 +353,13 @@ def test_enumerate_adapters(): # Check that we can get a device from each adapter for adapter in adapters: - d = adapter.request_device() + d = adapter.request_device_sync() assert isinstance(d, wgpu.backends.wgpu_native.GPUDevice) @mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") def test_adapter_destroy(): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") assert adapter._internal is not None adapter.__del__() assert adapter._internal is None @@ -401,9 +401,9 @@ def are_features_wgpu_legal(features): """Returns true if the list of features is legal. Determining whether a specific set of features is implemented on a particular device would make the tests fragile, so we only verify that the names are legal feature names.""" - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") try: - adapter.request_device(required_features=features) + adapter.request_device_sync(required_features=features) return True except RuntimeError as e: assert "Unsupported features were requested" in str(e) @@ -440,9 +440,9 @@ def are_limits_wgpu_legal(limits): """Returns true if the list of features is legal. Determining whether a specific set of features is implemented on a particular device would make the tests fragile, so we only verify that the names are legal feature names.""" - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") try: - adapter.request_device(required_limits=limits) + adapter.request_device_sync(required_limits=limits) return True except RuntimeError as e: assert "Unsupported features were requested" in str(e) diff --git a/tests/test_wgpu_native_buffer.py b/tests/test_wgpu_native_buffer.py index 31f9f5e3..22d25cf6 100644 --- a/tests/test_wgpu_native_buffer.py +++ b/tests/test_wgpu_native_buffer.py @@ -35,7 +35,7 @@ def test_buffer_init1(): ) # Download from buffer to CPU - buf.map(wgpu.MapMode.READ) + buf.map_sync(wgpu.MapMode.READ) wgpu.backends.wgpu_native._api.libf.wgpuDevicePoll( buf._device._internal, True, wgpu.backends.wgpu_native.ffi.NULL ) @@ -74,7 +74,7 @@ def test_buffer_init2(): buf.unmap() # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped() buf.unmap() print(data2.tobytes()) @@ -108,7 +108,7 @@ def test_buffer_init3(): buf = device.create_buffer(size=len(data1), usage="MAP_WRITE | COPY_SRC") # Write data to it - buf.map("write") + buf.map_sync("write") buf.write_mapped(data1) buf.unmap() @@ -124,7 +124,7 @@ def test_buffer_init3(): device.queue.write_buffer(buf, 0, data1) # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped() buf.unmap() assert data1 == data2 @@ -149,7 +149,7 @@ def test_consequitive_writes1(): # Write in parts for i in range(4): - buf.map("write") + buf.map_sync("write") buf.write_mapped(f"{i+1}".encode() * 8, i * 8) buf.unmap() @@ -175,7 +175,7 @@ def test_consequitive_writes2(): ) # Write in parts - buf.map("write") + buf.map_sync("write") for i in range(4): buf.write_mapped(f"{i+1}".encode() * 8, i * 8) buf.unmap() @@ -205,13 +205,13 @@ def test_consequitive_reads(): # Read in parts, the inefficient way for i in range(4): - buf.map("read") + buf.map_sync("read") data = buf.read_mapped(i * 8, 8) assert data == f"{i+1}".encode() * 8 buf.unmap() # Read in parts, the efficient way - buf.map("read") + buf.map_sync("read") for i in range(4): data = buf.read_mapped(i * 8, 8) assert data == f"{i+1}".encode() * 8 @@ -234,15 +234,15 @@ def test_buffer_mapping_fails(): buf.read_mapped() # Not mapped with raises(ValueError): - buf.map("boo") # Invalid map mode + buf.map_sync("boo") # Invalid map mode - buf.map("write", 0, 28) + buf.map_sync("write", 0, 28) with raises(RuntimeError): - buf.map("write") # Cannot map twice + buf.map_sync("write") # Cannot map twice with raises(RuntimeError): - buf.map("read") # Cannot map twice + buf.map_sync("read") # Cannot map twice with raises(RuntimeError): buf.read_mapped() # Not mapped in read mode @@ -296,13 +296,13 @@ def test_buffer_mapping_fails(): with raises(RuntimeError): buf.write_mapped(data) # not mapped - buf.map("read", 8, 20) + buf.map_sync("read", 8, 20) with raises(RuntimeError): - buf.map("read") # Cannot map twice + buf.map_sync("read") # Cannot map twice with raises(RuntimeError): - buf.map("write") # Cannot map twice + buf.map_sync("write") # Cannot map twice with raises(RuntimeError): buf.write_mapped(data) # not mapped in write mode @@ -334,7 +334,7 @@ def test_buffer_read_no_copy(): device.queue.write_buffer(buf, 0, data1) # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped(copy=False) data3 = buf.read_mapped(0, 8, copy=False) data4 = buf.read_mapped(8, 8, copy=False) @@ -502,7 +502,7 @@ def test_buffer_map_read_and_write(): # Upload data1 = b"abcdefghijkl" - buf1.map("write") + buf1.map_sync("write") buf1.write_mapped(data1) buf1.unmap() @@ -512,7 +512,7 @@ def test_buffer_map_read_and_write(): device.queue.submit([command_encoder.finish()]) # Download - buf2.map("read") + buf2.map_sync("read") data2 = buf2.read_mapped() buf2.unmap() assert data1 == data2 diff --git a/tests/test_wgpu_native_query_set.py b/tests/test_wgpu_native_query_set.py index 805ebba6..00ed8fd8 100644 --- a/tests/test_wgpu_native_query_set.py +++ b/tests/test_wgpu_native_query_set.py @@ -30,8 +30,8 @@ def test_query_set(): for i in range(n): data1[i] = float(i) - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device( + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync( required_features=[wgpu.FeatureName.timestamp_query] ) diff --git a/tests/test_wgpu_native_texture.py b/tests/test_wgpu_native_texture.py index 6bd300e0..0861fdb5 100644 --- a/tests/test_wgpu_native_texture.py +++ b/tests/test_wgpu_native_texture.py @@ -58,7 +58,7 @@ def test_do_a_copy_roundtrip(): # Upload from CPU to buffer # assert buf1.state == "unmapped" - # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # mapped_data = buf1.map_sync(wgpu.MapMode.WRITE) # assert buf1.state == "mapped" # mapped_data.cast("f")[:] = data1 # buf1.unmap() @@ -97,7 +97,7 @@ def test_do_a_copy_roundtrip(): # Download from buffer to CPU # assert buf5.state == "unmapped" # assert buf5.map_mode == 0 - # result_data = buf5.map(wgpu.MapMode.READ) # a memoryview + # result_data = buf5.map_sync(wgpu.MapMode.READ) # a memoryview # assert buf5.state == "mapped" # assert buf5.map_mode == wgpu.MapMode.READ # buf5.unmap() @@ -115,7 +115,7 @@ def test_do_a_copy_roundtrip(): # Upload from CPU to buffer # assert buf1.state == "unmapped" # assert buf1.map_mode == 0 - # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # mapped_data = buf1.map_sync(wgpu.MapMode.WRITE) # assert buf1.state == "mapped" # assert buf1.map_mode == wgpu.MapMode.WRITE # mapped_data.cast("f")[:] = data3 @@ -150,7 +150,7 @@ def test_do_a_copy_roundtrip(): # Download from buffer to CPU # assert buf5.state == "unmapped" - # result_data = buf5.map(wgpu.MapMode.READ) # always an uint8 array + # result_data = buf5.map_sync(wgpu.MapMode.READ) # always an uint8 array # assert buf5.state == "mapped" # buf5.unmap() # assert buf5.state == "unmapped" diff --git a/tests/test_wgpu_vertex_instance.py b/tests/test_wgpu_vertex_instance.py index ecda57dc..dfba3e42 100644 --- a/tests/test_wgpu_vertex_instance.py +++ b/tests/test_wgpu_vertex_instance.py @@ -72,16 +72,16 @@ class Runner: @classmethod def is_usable(cls): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") return set(cls.REQUIRED_FEATURES) <= adapter.features def __init__(self): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") features = [ *self.REQUIRED_FEATURES, *[x for x in self.OPTIONAL_FEATURES if x in adapter.features], ] - self.device = adapter.request_device(required_features=features) + self.device = adapter.request_device_sync(required_features=features) self.output_texture = self.device.create_texture( # Actual size is immaterial. Could just be 1x1 size=[128, 128], diff --git a/tests_mem/test_destroy.py b/tests_mem/test_destroy.py index 3736624b..3424cc1f 100644 --- a/tests_mem/test_destroy.py +++ b/tests_mem/test_destroy.py @@ -26,7 +26,7 @@ def test_destroy_device(n): adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() d.destroy() # NOTE: destroy is not yet implemented in wgpu-natice - this does not actually do anything yet yield d @@ -57,7 +57,7 @@ def test_destroy_buffer(n): # Uncomment the following lines to see. These are commented because it makes wgpu-core create a command-buffer. # try: - # b.map("READ") + # b.map_sync("READ") # except wgpu.GPUValidationError as err: # error = err # assert "destroyed" in error.message.lower() diff --git a/tests_mem/test_objects.py b/tests_mem/test_objects.py index 6aee8068..ce34a10a 100644 --- a/tests_mem/test_objects.py +++ b/tests_mem/test_objects.py @@ -20,7 +20,7 @@ def test_release_adapter(n): yield {} for i in range(n): - yield wgpu.gpu.request_adapter(power_preference="high-performance") + yield wgpu.gpu.request_adapter_sync(power_preference="high-performance") @create_and_release @@ -33,7 +33,7 @@ def test_release_device(n): } adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() yield d @@ -197,7 +197,7 @@ def test_release_queue(n): } adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() q = d.queue d._queue = None # detach yield q diff --git a/wgpu/__init__.py b/wgpu/__init__.py index 0c9ea7cd..646eef13 100644 --- a/wgpu/__init__.py +++ b/wgpu/__init__.py @@ -25,5 +25,5 @@ def request_adapter(*args, **kwargs): """Deprecated!""" raise DeprecationWarning( - "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter() instead." + "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter_sync() instead." ) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index 3bd6ba23..2504554f 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -81,23 +81,17 @@ class GPU: # IDL: Promise requestAdapter(optional GPURequestAdapterOptions options = {}); @apidiff.change("arguments include canvas") - def request_adapter( + def request_adapter_sync( self, *, power_preference=None, force_fallback_adapter=False, canvas=None ): - """Create a `GPUAdapter`, the object that represents an abstract wgpu - implementation, from which one can request a `GPUDevice`. + """Sync version of `request_adapter_async()`. - Arguments: - power_preference (PowerPreference): "high-performance" or "low-power". - force_fallback_adapter (bool): whether to use a (probably CPU-based) - fallback adapter. - canvas (WgpuCanvasInterface): The canvas that the adapter should - be able to render to. This can typically be left to None. + Provided by wgpu-py, but not compatible with WebGPU. """ # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu # noqa - return gpu.request_adapter( + return gpu.request_adapter_sync( power_preference=power_preference, force_fallback_adapter=force_fallback_adapter, canvas=canvas, @@ -108,15 +102,39 @@ def request_adapter( async def request_adapter_async( self, *, power_preference=None, force_fallback_adapter=False, canvas=None ): - """Async version of `request_adapter()`.""" - return self.request_adapter( + """Create a `GPUAdapter`, the object that represents an abstract wgpu + implementation, from which one can request a `GPUDevice`. + + Arguments: + power_preference (PowerPreference): "high-performance" or "low-power". + force_fallback_adapter (bool): whether to use a (probably CPU-based) + fallback adapter. + canvas (WgpuCanvasInterface): The canvas that the adapter should + be able to render to. This can typically be left to None. + """ + # If this method gets called, no backend has been loaded yet, let's do that now! + from .backends.auto import gpu # noqa + + return await gpu.request_adapter_async( power_preference=power_preference, force_fallback_adapter=force_fallback_adapter, canvas=canvas, ) @apidiff.add("Method useful for multi-gpu environments") - def enumerate_adapters(self): + def enumerate_adapters_sync(self): + """Sync version of `enumerate_adapters_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + + # If this method gets called, no backend has been loaded yet, let's do that now! + from .backends.auto import gpu # noqa + + return gpu.enumerate_adapters_sync() + + @apidiff.add("Method useful for multi-gpu environments") + async def enumerate_adapters_async(self): """Get a list of adapter objects available on the current system. An adapter can then be selected (e.g. using it's summary), and a device @@ -143,12 +161,7 @@ def enumerate_adapters(self): # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu # noqa - return gpu.enumerate_adapters() - - @apidiff.add("Method useful on desktop") - async def enumerate_adapters_async(self): - """Async version of enumerate_adapters.""" - return self.enumerate_adapters() + return await gpu.enumerate_adapters_async() # IDL: GPUTextureFormat getPreferredCanvasFormat(); @apidiff.change("Disabled because we put it on the canvas context") @@ -564,7 +577,7 @@ def limits(self): return self._limits # IDL: Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); - def request_device( + def request_device_sync( self, *, label="", @@ -572,13 +585,9 @@ def request_device( required_limits: "Dict[str, int]" = {}, default_queue: "structs.QueueDescriptor" = {}, ): - """Request a `GPUDevice` from the adapter. + """Sync version of `request_device_async()`. - Arguments: - label (str): A human readable label. Optional. - required_features (list of str): the features (extensions) that you need. Default []. - required_limits (dict): the various limits that you need. Default {}. - default_queue (structs.QueueDescriptor): Descriptor for the default queue. Optional. + Provided by wgpu-py, but not compatible with WebGPU. """ raise NotImplementedError() @@ -591,7 +600,14 @@ async def request_device_async( required_limits: "Dict[str, int]" = {}, default_queue: "structs.QueueDescriptor" = {}, ): - """Async version of `request_device()`.""" + """Request a `GPUDevice` from the adapter. + + Arguments: + label (str): A human readable label. Optional. + required_features (list of str): the features (extensions) that you need. Default []. + required_limits (dict): the various limits that you need. Default {}. + default_queue (structs.QueueDescriptor): Descriptor for the default queue. Optional. + """ raise NotImplementedError() def _release(self): @@ -665,7 +681,7 @@ class GPUDevice(GPUObjectBase): from it: when the device is lost, all objects created from it become invalid. - Create a device using `GPUAdapter.request_device()` or + Create a device using `GPUAdapter.request_device_sync()` or `GPUAdapter.request_device_async()`. """ @@ -709,18 +725,40 @@ def adapter(self): # IDL: readonly attribute Promise lost; @apidiff.hide("Not a Pythonic API") @property - def lost(self): + def lost_sync(self): + """Sync version of `lost`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + return self._get_lost_sync() + + # IDL: readonly attribute Promise lost; + @apidiff.hide("Not a Pythonic API") + @property + async def lost_async(self): """Provides information about why the device is lost.""" # In JS you can device.lost.then ... to handle lost devices. # We may want to eventually support something similar async-like? # at some point + + # Properties don't get repeated at _api.py, so we use a proxy method. + return await self._get_lost_async() + + def _get_lost_sync(self): + raise NotImplementedError() + + async def _get_lost_async(self): raise NotImplementedError() # IDL: attribute EventHandler onuncapturederror; @apidiff.hide("Specific to browsers") @property def onuncapturederror(self): - """Method called when an error is capured?""" + """Event handler. + + In JS you'd do ``gpuDevice.addEventListener('uncapturederror', ...)``. We'd need + to figure out how to do this in Python. + """ raise NotImplementedError() # IDL: undefined destroy(); @@ -1002,7 +1040,9 @@ async def create_compute_pipeline_async( layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", compute: "structs.ProgrammableStage", ): - """Async version of create_compute_pipeline().""" + """Async version of `create_compute_pipeline()`. + + Both versions are compatible with WebGPU.""" raise NotImplementedError() # IDL: GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); @@ -1161,7 +1201,9 @@ async def create_render_pipeline_async( multisample: "structs.MultisampleState" = {}, fragment: "structs.FragmentState" = None, ): - """Async version of create_render_pipeline().""" + """Async version of `create_render_pipeline()`. + + Both versions are compatible with WebGPU.""" raise NotImplementedError() # IDL: GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); @@ -1215,7 +1257,16 @@ def push_error_scope(self, filter): # IDL: Promise popErrorScope(); @apidiff.hide - def pop_error_scope(self): + def pop_error_scope_sync(self): + """Sync version of `pop_error_scope_async(). + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise popErrorScope(); + @apidiff.hide + async def pop_error_scope_async(self): """Pops a GPU error scope from the stack.""" raise NotImplementedError() @@ -1290,7 +1341,15 @@ def map_state(self): # an array-like object that exposes the shared memory. # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); - def map(self, mode, offset=0, size=None): + def map_sync(self, mode, offset=0, size=None): + """Sync version of `map_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); + async def map_async(self, mode, offset=0, size=None): """Maps the given range of the GPUBuffer. When this call returns, the buffer content is ready to be @@ -1307,11 +1366,6 @@ def map(self, mode, offset=0, size=None): """ raise NotImplementedError() - # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); - async def map_async(self, mode, offset=0, size=None): - """Alternative version of map().""" - raise NotImplementedError() - # IDL: undefined unmap(); def unmap(self): """Unmaps the buffer. @@ -1617,7 +1671,15 @@ class GPUShaderModule(GPUObjectBase): """ # IDL: Promise getCompilationInfo(); - def get_compilation_info(self): + def get_compilation_info_sync(self): + """Sync version of `get_compilation_info_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise getCompilationInfo(); + async def get_compilation_info_async(self): """Get shader compilation info. Always returns empty list at the moment.""" # How can this return shader errors if one cannot create a # shader module when the shader source has errors? @@ -2159,7 +2221,7 @@ def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None): Alignment: the buffer offset must be a multiple of 4, the total size to write must be a multiple of 4 bytes. - Also see `GPUBuffer.map()`. + Also see `GPUBuffer.map_sync()` and `GPUBuffer.map_async()`. """ raise NotImplementedError() @@ -2177,7 +2239,7 @@ def read_buffer(self, buffer, buffer_offset=0, size=None): and then maps that buffer to read the data. The given buffer's usage must include COPY_SRC. - Also see `GPUBuffer.map()`. + Also see `GPUBuffer._sync()` and `GPUBuffer._async()`. """ raise NotImplementedError() @@ -2222,16 +2284,24 @@ def read_texture(self, source, data_layout, size): """ raise NotImplementedError() - # IDL: Promise onSubmittedWorkDone(); - def on_submitted_work_done(self): - """TODO""" - raise NotImplementedError() - # IDL: undefined copyExternalImageToTexture( GPUImageCopyExternalImage source, GPUImageCopyTextureTagged destination, GPUExtent3D copySize); @apidiff.hide("Specific to browsers") def copy_external_image_to_texture(self, source, destination, copy_size): raise NotImplementedError() + # IDL: Promise onSubmittedWorkDone(); + def on_submitted_work_done_sync(self): + """Sync version of `on_submitted_work_done_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise onSubmittedWorkDone(); + async def on_submitted_work_done_async(self): + """TODO""" + raise NotImplementedError() + # %% Further non-GPUObject classes @@ -2447,5 +2517,33 @@ def _set_repr_methods(): cls.__repr__ = generic_repr +_async_warnings = {} + + +def _set_compat_methods_for_async_methods(): + def create_new_method(name): + def proxy_method(self, *args, **kwargs): + warning = _async_warnings.pop(name, None) + if warning: + logger.warning(warning) + return getattr(self, name)(*args, **kwargs) + + proxy_method.__name__ = name + "_backwards_compat_proxy" + proxy_method.__doc__ = f"Backwards compatibile method for {name}()" + return proxy_method + + m = globals() + for class_name in __all__: + cls = m[class_name] + for name, func in list(cls.__dict__.items()): + if name.endswith("_sync") and callable(func): + old_name = name[:-5] + setattr(cls, old_name, create_new_method(name)) + _async_warnings[name] = ( + f"WGPU: {old_name}() is deprecated, use {name}() instead." + ) + + _seed_object_counts() _set_repr_methods() +_set_compat_methods_for_async_methods() diff --git a/wgpu/backends/__init__.py b/wgpu/backends/__init__.py index 3e78dc0f..577708cd 100644 --- a/wgpu/backends/__init__.py +++ b/wgpu/backends/__init__.py @@ -14,7 +14,7 @@ def _register_backend(gpu): root_namespace = sys.modules["wgpu"].__dict__ needed_attributes = ( - "request_adapter", + "request_adapter_sync", "request_adapter_async", "wgsl_language_features", ) diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index d19d6c24..d8842abf 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -12,7 +12,7 @@ class GPU: - def request_adapter(self, **parameters): + def request_adapter_sync(self, **parameters): raise NotImplementedError("Cannot use sync API functions in JS.") async def request_adapter_async(self, **parameters): diff --git a/wgpu/backends/rs.py b/wgpu/backends/rs.py index a2e4a187..cfe3f2aa 100644 --- a/wgpu/backends/rs.py +++ b/wgpu/backends/rs.py @@ -6,7 +6,7 @@ WARNING: wgpu.backends.rs is deprecated. Instead you can use: - import wgpu.backends.wgpu_native to use the backend by its new name. - import wgpu.backends.auto to do the same, but simpler and more future proof. -- simply use wgpu.gpu.request_adapter() to auto-load the backend. +- simply use wgpu.gpu.request_adapter_sync() to auto-load the backend. """.strip() print(_deprecation_msg) diff --git a/wgpu/backends/wgpu_native/__init__.py b/wgpu/backends/wgpu_native/__init__.py index 430200dd..0e81c859 100644 --- a/wgpu/backends/wgpu_native/__init__.py +++ b/wgpu/backends/wgpu_native/__init__.py @@ -18,4 +18,5 @@ gpu = GPU() # noqa: F405 _register_backend(gpu) # noqa: F405 -from .extras import enumerate_adapters, request_device # noqa: F401, E402 +from .extras import enumerate_adapters # noqa: F401, E402 +from .extras import request_device_sync, request_device # noqa: F401, E402 diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index f360aec0..0326ba09 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -46,6 +46,11 @@ # %% Helper functions and objects +def check_can_use_sync_variants(): + if False: # placeholder, let's implement a little wgpu config thingy + raise RuntimeError("Disallowed use of '_sync' API.") + + # Object to be able to bind the lifetime of objects to other objects _refs_per_struct = WeakKeyDictionary() @@ -304,7 +309,21 @@ def _get_features(id: int, device: bool = False, adapter: bool = False): class GPU(classes.GPU): - def request_adapter( + + def request_adapter_sync( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): + """Async version of ``request_adapter_async()``. + This is the implementation based on wgpu-native. + """ + check_can_use_sync_variants() + return self._request_adapter( + power_preference=power_preference, + force_fallback_adapter=force_fallback_adapter, + canvas=canvas, + ) + + async def request_adapter_async( self, *, power_preference=None, force_fallback_adapter=False, canvas=None ): """Create a `GPUAdapter`, the object that represents an abstract wgpu @@ -319,7 +338,15 @@ def request_adapter( canvas (WgpuCanvasInterface): The canvas that the adapter should be able to render to. This can typically be left to None. """ + return self._request_adapter( + power_preference=power_preference, + force_fallback_adapter=force_fallback_adapter, + canvas=canvas, + ) # no-cover + def _request_adapter( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): # ----- Surface ID # Get surface id that the adapter must be compatible with. If we @@ -382,22 +409,20 @@ def callback(status, result, message, userdata): return self._create_adapter(adapter_id) - async def request_adapter_async( - self, *, power_preference=None, force_fallback_adapter=False, canvas=None - ): - """Async version of ``request_adapter()``. + def enumerate_adapters_sync(self): + """Sync version of ``enumerate_adapters_async()``. This is the implementation based on wgpu-native. """ - return self.request_adapter( - power_preference=power_preference, - force_fallback_adapter=force_fallback_adapter, - canvas=canvas, - ) # no-cover + check_can_use_sync_variants() + return self._enumerate_adapters() - def enumerate_adapters(self): + async def enumerate_adapters_async(self): """Get a list of adapter objects available on the current system. This is the implementation based on wgpu-native. """ + return self._enumerate_adapters() + + def _enumerate_adapters(self): # The first call is to get the number of adapters, and the second call # is to get the actual adapters. Note that the second arg (now NULL) can # be a `WGPUInstanceEnumerateAdapterOptions` to filter by backend. @@ -807,7 +832,8 @@ class GPUAdapterInfo(classes.GPUAdapterInfo): class GPUAdapter(classes.GPUAdapter): - def request_device( + + def request_device_sync( self, *, label="", @@ -815,12 +841,30 @@ def request_device( required_limits: "Dict[str, int]" = {}, default_queue: "structs.QueueDescriptor" = {}, ): + check_can_use_sync_variants() if default_queue: check_struct("QueueDescriptor", default_queue) return self._request_device( label, required_features, required_limits, default_queue, "" ) + async def request_device_async( + self, + *, + label="", + required_features: "List[enums.FeatureName]" = [], + required_limits: "Dict[str, int]" = {}, + default_queue: "structs.QueueDescriptor" = {}, + ): + if default_queue: + check_struct("QueueDescriptor", default_queue) + return self._request_device( + label, + required_features=required_features, + required_limits=required_limits, + default_queue=default_queue, + ) + def _request_device( self, label, required_features, required_limits, default_queue, trace_path ): @@ -1007,20 +1051,6 @@ def callback(status, result, message, userdata): return device - async def request_device_async( - self, - *, - label="", - required_features: "List[enums.FeatureName]" = [], - required_limits: "Dict[str, int]" = {}, - default_queue: "structs.QueueDescriptor" = {}, - ): - if default_queue: - check_struct("QueueDescriptor", default_queue) - return self._request_device( - label, required_features, required_limits, default_queue, "" - ) # no-cover - def _release(self): if self._internal is not None and libf is not None: self._internal, internal = None, self._internal @@ -1842,6 +1872,13 @@ def create_query_set(self, *, label="", type: "enums.QueryType", count: int): query_id = libf.wgpuDeviceCreateQuerySet(self._internal, query_set_descriptor) return GPUQuerySet(label, query_id, self._internal, type, count) + def _get_lost_sync(self): + check_can_use_sync_variants() + raise NotImplementedError() + + async def _get_lost_async(self): + raise NotImplementedError() + def destroy(self): # Note: not yet implemented in wgpu-core, the wgpu-native func is a noop internal = self._internal @@ -1900,7 +1937,14 @@ def _check_range(self, offset, size): raise ValueError("Mapped range must not extend beyond total buffer size.") return offset, size - def map(self, mode, offset=0, size=None): + def map_sync(self, mode, offset=0, size=None): + check_can_use_sync_variants() + return self._map(mode, offset, size) + + async def map_async(self, mode, offset=0, size=None): + return self._map(mode, offset, size) # for now + + def _map(self, mode, offset=0, size=None): sync_on_read = True # Check mode @@ -1950,9 +1994,6 @@ def callback(status_, user_data_p): self._mapped_status = offset, offset + size, mode self._mapped_memoryviews = [] - async def map_async(self, mode, offset=0, size=None): - return self.map(mode, offset, size) # for now - def unmap(self): if self._map_state != enums.BufferMapState.mapped: raise RuntimeError("Can only unmap a buffer if its currently mapped.") @@ -2175,7 +2216,14 @@ class GPUShaderModule(classes.GPUShaderModule, GPUObjectBase): # GPUObjectBaseMixin _release_function = libf.wgpuShaderModuleRelease - def get_compilation_info(self): + def get_compilation_info_sync(self): + check_can_use_sync_variants() + return self._get_compilation_info() + + async def get_compilation_info_async(self): + return self._get_compilation_info() + + def _get_compilation_info(self): # Here's a little setup to implement this method. Unfortunately, # this is not yet implemented in wgpu-native. Another problem # is that if there is an error in the shader source, we raise @@ -3078,7 +3126,7 @@ def read_buffer(self, buffer, buffer_offset=0, size=None): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") + tmp_buffer._map("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. @@ -3182,7 +3230,7 @@ def read_texture(self, source, data_layout, size): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") + tmp_buffer._map("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. @@ -3206,7 +3254,8 @@ def read_texture(self, source, data_layout, size): return data - def on_submitted_work_done(self): + def on_submitted_work_done_sync(self): + check_can_use_sync_variants() # In JS, this returns a Promise that can be awaited to (async) wait # for the work that is currently in the pipeline. We need to figure out # how to expose these async parts. @@ -3230,6 +3279,9 @@ def callback(status_, user_data_p): if status != 0: raise RuntimeError(f"Queue work done status: {status}") + async def on_submitted_work_done_async(self): + raise NotImplementedError() + class GPURenderBundle(classes.GPURenderBundle, GPUObjectBase): # GPUObjectBaseMixin diff --git a/wgpu/backends/wgpu_native/extras.py b/wgpu/backends/wgpu_native/extras.py index e04196c9..2fd772cc 100644 --- a/wgpu/backends/wgpu_native/extras.py +++ b/wgpu/backends/wgpu_native/extras.py @@ -11,10 +11,10 @@ def enumerate_adapters(): """Deprecated.""" - raise RuntimeError("Deprecated: use wgpu.gpu.enumerate_adapters() instead.") + raise RuntimeError("Deprecated: use wgpu.gpu.enumerate_adapters_sync() instead.") -def request_device( +def request_device_sync( adapter, trace_path, *, @@ -35,6 +35,14 @@ def request_device( ) +# Backwards compat for deprecated function +def request_device(*args, **kwargs): + logger.warning( + "WGPU: wgpu.backends.wgpu_native.request_device() is deprecated, use request_device_sync() instead." + ) + return request_device_sync(*args, **kwargs) + + def create_pipeline_layout( device, *, diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md index 6693fa4a..d29d8407 100644 --- a/wgpu/resources/codegen_report.md +++ b/wgpu/resources/codegen_report.md @@ -9,18 +9,18 @@ * Wrote 34 enums to enums.py * Wrote 60 structs to structs.py ### Patching API for _classes.py -* Diffs for GPU: add enumerate_adapters, add enumerate_adapters_async, change get_preferred_canvas_format, change request_adapter, change request_adapter_async +* Diffs for GPU: add enumerate_adapters_async, add enumerate_adapters_sync, change get_preferred_canvas_format, change request_adapter_async, change request_adapter_sync * Diffs for GPUCanvasContext: add get_preferred_format, add present * Diffs for GPUAdapter: add summary -* Diffs for GPUDevice: add adapter, add create_buffer_with_data, hide import_external_texture, hide lost, hide onuncapturederror, hide pop_error_scope, hide push_error_scope +* Diffs for GPUDevice: add adapter, add create_buffer_with_data, hide import_external_texture, hide lost_async, hide lost_sync, hide onuncapturederror, hide pop_error_scope_async, hide pop_error_scope_sync, hide push_error_scope * Diffs for GPUBuffer: add map_read, add map_write, add read_mapped, add write_mapped, hide get_mapped_range * Diffs for GPUTexture: add size * Diffs for GPUTextureView: add size, add texture * Diffs for GPUBindingCommandsMixin: change set_bind_group * Diffs for GPUQueue: add read_buffer, add read_texture, hide copy_external_image_to_texture -* Validated 37 classes, 121 methods, 45 properties +* Validated 37 classes, 126 methods, 46 properties ### Patching API for backends/wgpu_native/_api.py -* Validated 37 classes, 96 methods, 0 properties +* Validated 37 classes, 105 methods, 0 properties ## Validating backends/wgpu_native/_api.py * Enum field FeatureName.texture-compression-bc-sliced-3d missing in wgpu.h * Enum field FeatureName.clip-distances missing in wgpu.h diff --git a/wgpu/utils/device.py b/wgpu/utils/device.py index 1a42076e..c50dbbae 100644 --- a/wgpu/utils/device.py +++ b/wgpu/utils/device.py @@ -12,6 +12,6 @@ def get_default_device(): if _default_device is None: import wgpu.backends.auto # noqa - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - _default_device = adapter.request_device() + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + _default_device = adapter.request_device_sync() return _default_device